diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index d91642359..45cb436c3 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/hashicorp/terraform",
- "GoVersion": "go1.5.2",
+ "GoVersion": "go1.5",
"Packages": [
"github.com/hashicorp/terraform",
"github.com/hashicorp/terraform/builtin/bins/provider-atlas",
@@ -444,6 +444,10 @@
"Comment": "v1.8.6",
"Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3"
},
+ {
+ "ImportPath": "github.com/golang/protobuf/proto",
+ "Rev": "34a5f244f1c01cdfee8e60324258cfbb97a42aec"
+ },
{
"ImportPath": "github.com/google/go-querystring/query",
"Rev": "2a60fc2ba6c19de80291203597d752e9ba58e4c0"
@@ -689,10 +693,6 @@
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "1f22c0103821b9390939b6776727195525381532"
},
- {
- "ImportPath": "golang.org/x/sys/unix",
- "Rev": "c65f27ffe69a43ae814a5e64c2fabb6c6312a4af"
- },
{
"ImportPath": "golang.org/x/net/context",
"Rev": "04b9de9b512f58addf28c9853d50ebef61c3953e"
@@ -701,6 +701,10 @@
"ImportPath": "golang.org/x/oauth2",
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
},
+ {
+ "ImportPath": "golang.org/x/sys/unix",
+ "Rev": "c65f27ffe69a43ae814a5e64c2fabb6c6312a4af"
+ },
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b"
@@ -733,6 +737,10 @@
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "f6ba98773d96b877b246f3a9266493dfc11e276b"
},
+ {
+ "ImportPath": "google.golang.org/appengine",
+ "Rev": "b667a5000b082e49c6c6d16867d376a12e9490cd"
+ },
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "fb10e8da373d97f6ba5e648299a10b3b91f14cd5"
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 000000000..1b1b1921e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 000000000..f1f06564a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 000000000..6c6a7d95f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,197 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: MessageSet and RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i))
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+func mergeAny(out, in reflect.Value) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(in)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key))
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem())
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i))
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value))
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 000000000..312e60436
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,827 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+
+ y := *v
+ for i := 0; i < nb; i++ {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 000000000..b46f76065
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1293 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ keys := v.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 000000000..d8673a3e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,256 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+// TODO: MessageSet.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN.
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+func equalAny(v1, v2 reflect.Value) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem())
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 000000000..e591ccef7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,400 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ rep := extension.repeated()
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if !rep || o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 000000000..89ca42ad7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,796 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+ break
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ if err != nil {
+ fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ if err != nil {
+ fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+ return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 000000000..9d912bce1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,287 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and MessageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+//
+// When a proto1 proto has a field that looks like:
+// optional message info = 3;
+// the protocol compiler produces a field in the generated struct that looks like:
+// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
+// The package is automatically inserted so there is no need for that proto file to
+// import this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type MessageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure MessageSet is a Message.
+var _ Message = (*MessageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *MessageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return ErrNoMessageTypeId
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *MessageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return ErrNoMessageTypeId
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *MessageSet) Reset() { *ms = MessageSet{} }
+func (ms *MessageSet) String() string { return CompactTextString(ms) }
+func (*MessageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(MessageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 000000000..c68b12525
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// Map returns the reflect.Value for the address of a map field in the struct.
+func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 000000000..48bc0fa48
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Map returns the reflect.Value for the address of a map field in the struct.
+func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 000000000..d74844ab2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,742 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
new file mode 100644
index 000000000..37c778209
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
@@ -0,0 +1,122 @@
+// Code generated by protoc-gen-go.
+// source: proto3_proto/proto3.proto
+// DO NOT EDIT!
+
+/*
+Package proto3_proto is a generated protocol buffer package.
+
+It is generated from these files:
+ proto3_proto/proto3.proto
+
+It has these top-level messages:
+ Message
+ Nested
+ MessageWithMap
+*/
+package proto3_proto
+
+import proto "github.com/golang/protobuf/proto"
+import testdata "github.com/golang/protobuf/proto/testdata"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+type Message_Humour int32
+
+const (
+ Message_UNKNOWN Message_Humour = 0
+ Message_PUNS Message_Humour = 1
+ Message_SLAPSTICK Message_Humour = 2
+ Message_BILL_BAILEY Message_Humour = 3
+)
+
+var Message_Humour_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "PUNS",
+ 2: "SLAPSTICK",
+ 3: "BILL_BAILEY",
+}
+var Message_Humour_value = map[string]int32{
+ "UNKNOWN": 0,
+ "PUNS": 1,
+ "SLAPSTICK": 2,
+ "BILL_BAILEY": 3,
+}
+
+func (x Message_Humour) String() string {
+ return proto.EnumName(Message_Humour_name, int32(x))
+}
+
+type Message struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
+ HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"`
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+ ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"`
+ TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"`
+ Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
+ Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
+ Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
+ Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"`
+ Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+
+func (m *Message) GetNested() *Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *Message) GetTerrain() map[string]*Nested {
+ if m != nil {
+ return m.Terrain
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Field() *testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Field
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Value
+ }
+ return nil
+}
+
+type Nested struct {
+ Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
+}
+
+func (m *Nested) Reset() { *m = Nested{} }
+func (m *Nested) String() string { return proto.CompactTextString(m) }
+func (*Nested) ProtoMessage() {}
+
+type MessageWithMap struct {
+ ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
new file mode 100644
index 000000000..e2311d929
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
@@ -0,0 +1,68 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+import "testdata/test.proto";
+
+package proto3_proto;
+
+message Message {
+ enum Humour {
+ UNKNOWN = 0;
+ PUNS = 1;
+ SLAPSTICK = 2;
+ BILL_BAILEY = 3;
+ }
+
+ string name = 1;
+ Humour hilarity = 2;
+ uint32 height_in_cm = 3;
+ bytes data = 4;
+ int64 result_count = 7;
+ bool true_scotsman = 8;
+ float score = 9;
+
+ repeated uint64 key = 5;
+ Nested nested = 6;
+
+ map terrain = 10;
+ testdata.SubDefaults proto2_field = 11;
+ map proto2_value = 13;
+}
+
+message Nested {
+ string bunny = 1;
+}
+
+message MessageWithMap {
+ map byte_mapping = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 000000000..4ad159058
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,792 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+var (
+ messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
+)
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ if sv.Type() == messageSetType {
+ return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
+ }
+
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys() // TODO: should we sort these for deterministic output?
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ switch fv.Kind() {
+ case reflect.Bool:
+ if !fv.Bool() {
+ continue
+ }
+ case reflect.Int32, reflect.Int64:
+ if fv.Int() == 0 {
+ continue
+ }
+ case reflect.Uint32, reflect.Uint64:
+ if fv.Uint() == 0 {
+ continue
+ }
+ case reflect.Float32, reflect.Float64:
+ if fv.Float() == 0 {
+ continue
+ }
+ case reflect.String:
+ if fv.String() == "" {
+ continue
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeMessageSet(w *textWriter, ms *MessageSet) error {
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ if msd, ok := messageSetMap[id]; ok {
+ // Known message set type.
+ if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
+ return err
+ }
+ w.indent()
+
+ pb := reflect.New(msd.t.Elem())
+ if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
+ if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
+ return err
+ }
+ } else {
+ if err := writeStruct(w, pb.Elem()); err != nil {
+ return err
+ }
+ }
+ } else {
+ // Unknown type.
+ if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
+ return err
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, item.Message); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if _, err := w.Write(gtNewline); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 000000000..5c466d54a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,772 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %v", p.s[0:i+1])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || p.s[0] != '"' {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
+ sprops := GetProperties(st)
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ reqCount := GetProperties(st).reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ } else {
+ // This is a normal, non-extension field.
+ name := tok.value
+ fi, props, ok := structFieldByName(st, name)
+ if !ok {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ dst := sv.Field(fi)
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field. May already exist.
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(at, flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 000000000..bfa8658a4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.4
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/google.golang.org"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/appengine"
+ - go get -v -t -d google.golang.org/appengine/...
+
+script:
+ - go test -v google.golang.org/appengine/...
+ - go test -v -race google.golang.org/appengine/...
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 000000000..962ebf07c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,70 @@
+# Go App Engine for Managed VMs
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime for Managed VMs on App Engine.
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/managed-vms/
+for more information.
+
+## Directory structure
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating a Go App Engine app
+
+This section describes how to update a traditional Go App Engine app to run on Managed VMs.
+
+### 1. Update YAML files
+
+The `app.yaml` file (and YAML files for modules) should have these new lines added:
+```
+vm: true
+manual_scaling:
+ instances: 1
+```
+See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
+
+### 2. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+You can do that manually, or by running this command to recursively update all Go source files in the current directory:
+(may require GNU sed)
+```
+sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \
+ $(find . -name '*.go')
+```
+
+### 3. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and some are not available yet.
+This list summarises the differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+ `context.Context` instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+ deprecated and unused for a long time.
+* `appengine/aetest`, `appengine/blobstore`, `appengine/cloudsql`
+ and `appengine/runtime` have not been ported yet.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+ Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* `appengine.IsCapabilityDisabled` and `appengine/capability` are obsolete.
+* Most of `appengine/file` is deprecated. Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
+* `appengine/socket` is deprecated. Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 000000000..0e2b329e0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,57 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+ return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+ return WithContext(context.Background(), req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+ return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// TODO(dsymonds): Add BackgroundContext function?
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+ Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+ return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 000000000..41f559b26
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,21 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import "google.golang.org/appengine/internal"
+
+// The comment below must not be changed.
+// It is used by go-app-builder to recognise that this package has
+// the Main function to use in the synthetic main.
+// The gophers party all night; the rabbits provide the beats.
+
+// Main installs the health checker and creates a server listening on port
+// "PORT" if set in the environment or on port 8080.
+// It uses the default http handler and never returns.
+func Main() {
+ internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go
new file mode 100644
index 000000000..004f5ddc8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel.go
@@ -0,0 +1,83 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package channel implements the server side of App Engine's Channel API.
+
+Create creates a new channel associated with the given clientID,
+which must be unique to the client that will use the returned token.
+
+ token, err := channel.Create(c, "player1")
+ if err != nil {
+ // handle error
+ }
+ // return token to the client in an HTTP response
+
+Send sends a message to the client over the channel identified by clientID.
+
+ channel.Send(c, "player1", "Game over!")
+*/
+package channel
+
+import (
+ "encoding/json"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/channel"
+)
+
+// Create creates a channel and returns a token for use by the client.
+// The clientID is an application-provided string used to identify the client.
+func Create(c context.Context, clientID string) (token string, err error) {
+ req := &pb.CreateChannelRequest{
+ ApplicationKey: &clientID,
+ }
+ resp := &pb.CreateChannelResponse{}
+ err = internal.Call(c, service, "CreateChannel", req, resp)
+ token = resp.GetToken()
+ return token, remapError(err)
+}
+
+// Send sends a message on the channel associated with clientID.
+func Send(c context.Context, clientID, message string) error {
+ req := &pb.SendMessageRequest{
+ ApplicationKey: &clientID,
+ Message: &message,
+ }
+ resp := &basepb.VoidProto{}
+ return remapError(internal.Call(c, service, "SendChannelMessage", req, resp))
+}
+
+// SendJSON is a helper function that sends a JSON-encoded value
+// on the channel associated with clientID.
+func SendJSON(c context.Context, clientID string, value interface{}) error {
+ m, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ return Send(c, clientID, string(m))
+}
+
+// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
+func remapError(err error) error {
+ if e, ok := err.(*internal.APIError); ok {
+ if e.Service == "xmpp" {
+ e.Service = "channel"
+ }
+ }
+ return err
+}
+
+var service = "xmpp" // prod
+
+func init() {
+ if appengine.IsDevAppServer() {
+ service = "channel" // dev
+ }
+ internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
new file mode 100644
index 000000000..11d5fb571
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
@@ -0,0 +1,338 @@
+// Program aebundler turns a Go app into a fully self-contained tar file.
+// The app and its subdirectories (if any) are placed under "."
+// and the dependencies from $GOPATH are placed under ./_gopath/src.
+// A main func is synthesized if one does not exist.
+//
+// A sample Dockerfile to be used with this bundler could look like this:
+// FROM gcr.io/google_appengine/go-compat
+// ADD . /app
+// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe
+package main
+
+import (
+ "archive/tar"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ output = flag.String("o", "", "name of output tar file or '-' for stdout")
+ rootDir = flag.String("root", ".", "directory name of application root")
+ vm = flag.Bool("vm", true, "bundle a Managed VM app")
+
+ skipFiles = map[string]bool{
+ ".git": true,
+ ".gitconfig": true,
+ ".hg": true,
+ ".travis.yml": true,
+ }
+)
+
+const (
+ newMain = `package main
+import "google.golang.org/appengine"
+func main() {
+ appengine.Main()
+}
+`
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s -o \tBundle app to named tar file or stdout\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\noptional arguments:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ var tags []string
+ if *vm {
+ tags = append(tags, "appenginevm")
+ } else {
+ tags = append(tags, "appengine")
+ }
+
+ tarFile := *output
+ if tarFile == "" {
+ usage()
+ errorf("Required -o flag not specified.")
+ }
+
+ app, err := analyze(tags)
+ if err != nil {
+ errorf("Error analyzing app: %v", err)
+ }
+ if err := app.bundle(tarFile); err != nil {
+ errorf("Unable to bundle app: %v", err)
+ }
+}
+
+// errorf prints the error message and exits.
+func errorf(format string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...)
+ os.Exit(1)
+}
+
+type app struct {
+ hasMain bool
+ appFiles []string
+ imports map[string]string
+}
+
+// analyze checks the app for building with the given build tags and returns hasMain,
+// app files, and a map of full directory import names to original import names.
+func analyze(tags []string) (*app, error) {
+ ctxt := buildContext(tags)
+ hasMain, appFiles, err := checkMain(ctxt)
+ if err != nil {
+ return nil, err
+ }
+ gopath := filepath.SplitList(ctxt.GOPATH)
+ im, err := imports(ctxt, *rootDir, gopath)
+ return &app{
+ hasMain: hasMain,
+ appFiles: appFiles,
+ imports: im,
+ }, err
+}
+
+// buildContext returns the context for building the source.
+func buildContext(tags []string) *build.Context {
+ return &build.Context{
+ GOARCH: build.Default.GOARCH,
+ GOOS: build.Default.GOOS,
+ GOROOT: build.Default.GOROOT,
+ GOPATH: build.Default.GOPATH,
+ Compiler: build.Default.Compiler,
+ BuildTags: append(build.Default.BuildTags, tags...),
+ }
+}
+
+// bundle bundles the app into the named tarFile ("-"==stdout).
+func (s *app) bundle(tarFile string) (err error) {
+ var out io.Writer
+ if tarFile == "-" {
+ out = os.Stdout
+ } else {
+ f, err := os.Create(tarFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+ out = f
+ }
+ tw := tar.NewWriter(out)
+
+ for srcDir, importName := range s.imports {
+ dstDir := "_gopath/src/" + importName
+ if err = copyTree(tw, dstDir, srcDir); err != nil {
+ return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err)
+ }
+ }
+ if err := copyTree(tw, ".", *rootDir); err != nil {
+ return fmt.Errorf("unable to copy root directory to /app: %v", err)
+ }
+ if !s.hasMain {
+ if err := synthesizeMain(tw, s.appFiles); err != nil {
+ return fmt.Errorf("unable to synthesize new main func: %v", err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ return fmt.Errorf("unable to close tar file %v: %v", tarFile, err)
+ }
+ return nil
+}
+
+// synthesizeMain generates a new main func and writes it to the tarball.
+func synthesizeMain(tw *tar.Writer, appFiles []string) error {
+ appMap := make(map[string]bool)
+ for _, f := range appFiles {
+ appMap[f] = true
+ }
+ var f string
+ for i := 0; i < 100; i++ {
+ f = fmt.Sprintf("app_main%d.go", i)
+ if !appMap[filepath.Join(*rootDir, f)] {
+ break
+ }
+ }
+ if appMap[filepath.Join(*rootDir, f)] {
+ return fmt.Errorf("unable to find unique name for %v", f)
+ }
+ hdr := &tar.Header{
+ Name: f,
+ Mode: 0644,
+ Size: int64(len(newMain)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", f, err)
+ }
+ if _, err := tw.Write([]byte(newMain)); err != nil {
+ return fmt.Errorf("unable to write %v to tar file: %v", f, err)
+ }
+ return nil
+}
+
+// imports returns a map of all import directories (recursively) used by the app.
+// The return value maps full directory names to original import names.
+func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {
+ pkg, err := ctxt.ImportDir(srcDir, 0)
+ if err != nil {
+ return nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+
+ // Resolve all non-standard-library imports
+ result := make(map[string]string)
+ for _, v := range pkg.Imports {
+ if !strings.Contains(v, ".") {
+ continue
+ }
+ src, err := findInGopath(v, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err)
+ }
+ result[src] = v
+ im, err := imports(ctxt, src, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse package %v: %v", src, err)
+ }
+ for k, v := range im {
+ result[k] = v
+ }
+ }
+ return result, nil
+}
+
+// findInGopath searches the gopath for the named import directory.
+func findInGopath(dir string, gopath []string) (string, error) {
+ for _, v := range gopath {
+ dst := filepath.Join(v, "src", dir)
+ if _, err := os.Stat(dst); err == nil {
+ return dst, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath)
+}
+
+// copyTree copies srcDir to tar file dstDir, ignoring skipFiles.
+func copyTree(tw *tar.Writer, dstDir, srcDir string) error {
+ entries, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return fmt.Errorf("unable to read dir %v: %v", srcDir, err)
+ }
+ for _, entry := range entries {
+ n := entry.Name()
+ if skipFiles[n] {
+ continue
+ }
+ s := filepath.Join(srcDir, n)
+ d := filepath.Join(dstDir, n)
+ if entry.IsDir() {
+ if err := copyTree(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ continue
+ }
+ if err := copyFile(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ }
+ return nil
+}
+
+// copyFile copies src to tar file dst.
+func copyFile(tw *tar.Writer, dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("unable to open %v: %v", src, err)
+ }
+ defer s.Close()
+ fi, err := s.Stat()
+ if err != nil {
+ return fmt.Errorf("unable to stat %v: %v", src, err)
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, dst)
+ if err != nil {
+ return fmt.Errorf("unable to create tar header for %v: %v", dst, err)
+ }
+ hdr.Name = dst
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", dst, err)
+ }
+ _, err = io.Copy(tw, s)
+ if err != nil {
+ return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err)
+ }
+ return nil
+}
+
+// checkMain verifies that there is a single "main" function.
+// It also returns a list of all Go source files in the app.
+func checkMain(ctxt *build.Context) (bool, []string, error) {
+ pkg, err := ctxt.ImportDir(*rootDir, 0)
+ if err != nil {
+ return false, nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+ if !pkg.IsCommand() {
+ errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name)
+ }
+ // Search for a "func main"
+ var hasMain bool
+ var appFiles []string
+ for _, f := range pkg.GoFiles {
+ n := filepath.Join(*rootDir, f)
+ appFiles = append(appFiles, n)
+ if hasMain, err = readFile(n); err != nil {
+ return false, nil, fmt.Errorf("error parsing %q: %v", n, err)
+ }
+ }
+ return hasMain, appFiles, nil
+}
+
+// isMain returns whether the given function declaration is a main function.
+// Such a function must be called "main", not have a receiver, and have no arguments or return types.
+func isMain(f *ast.FuncDecl) bool {
+ ft := f.Type
+ return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0
+}
+
+// readFile reads and parses the Go source code file and returns whether it has a main function.
+func readFile(filename string) (hasMain bool, err error) {
+ var src []byte
+ src, err = ioutil.ReadFile(filename)
+ if err != nil {
+ return
+ }
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ for _, decl := range file.Decls {
+ funcDecl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !isMain(funcDecl) {
+ continue
+ }
+ hasMain = true
+ break
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
new file mode 100644
index 000000000..65c4d7b7b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
@@ -0,0 +1,260 @@
+// Program aedeploy assists with deploying Go Managed VM apps to production.
+// A temporary directory is created; the app, its subdirectories, and all its
+// dependencies from $GOPATH are copied into the directory; then the app
+// is deployed to production with the provided command.
+//
+// The app must be in "package main".
+//
+// This command must be issued from within the root directory of the app
+// (where the app.yaml file is located).
+//
+// A sample Dockerfile to be used with this tool could look like this:
+// FROM gcr.io/google_appengine/go-compat
+// ADD . /app
+// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ skipFiles = map[string]bool{
+ ".git": true,
+ ".gitconfig": true,
+ ".hg": true,
+ ".travis.yml": true,
+ }
+
+ gopathCache = map[string]string{}
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug preview app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0])
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() < 1 {
+ usage()
+ os.Exit(1)
+ }
+
+ if err := aedeploy(); err != nil {
+ fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func aedeploy() error {
+ tags := []string{"appenginevm"}
+ app, err := analyze(tags)
+ if err != nil {
+ return err
+ }
+
+ tmpDir, err := app.bundle()
+ if tmpDir != "" {
+ defer os.RemoveAll(tmpDir)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := os.Chdir(tmpDir); err != nil {
+ return fmt.Errorf("unable to chdir to %v: %v", tmpDir, err)
+ }
+ return deploy()
+}
+
+// deploy calls the provided command to deploy the app from the temporary directory.
+func deploy() error {
+ cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err)
+ }
+ return nil
+}
+
+type app struct {
+ appFiles []string
+ imports map[string]string
+}
+
+// analyze checks the app for building with the given build tags and returns
+// app files, and a map of full directory import names to original import names.
+func analyze(tags []string) (*app, error) {
+ ctxt := buildContext(tags)
+ appFiles, err := appFiles(ctxt)
+ if err != nil {
+ return nil, err
+ }
+ gopath := filepath.SplitList(ctxt.GOPATH)
+ im, err := imports(ctxt, ".", gopath)
+ return &app{
+ appFiles: appFiles,
+ imports: im,
+ }, err
+}
+
+// buildContext returns the context for building the source.
+func buildContext(tags []string) *build.Context {
+ return &build.Context{
+ GOARCH: "amd64",
+ GOOS: "linux",
+ GOROOT: build.Default.GOROOT,
+ GOPATH: build.Default.GOPATH,
+ Compiler: build.Default.Compiler,
+ BuildTags: append(build.Default.BuildTags, tags...),
+ }
+}
+
+// bundle bundles the app into a temporary directory.
+func (s *app) bundle() (tmpdir string, err error) {
+ workDir, err := ioutil.TempDir("", os.Args[0])
+ if err != nil {
+ return "", fmt.Errorf("unable to create tmpdir: %v", err)
+ }
+
+ for srcDir, importName := range s.imports {
+ dstDir := "_gopath/src/" + importName
+ if err := copyTree(workDir, dstDir, srcDir); err != nil {
+ return workDir, fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err)
+ }
+ }
+ if err := copyTree(workDir, ".", "."); err != nil {
+ return workDir, fmt.Errorf("unable to copy root directory to /app: %v", err)
+ }
+ return workDir, nil
+}
+
+// imports returns a map of all import directories (recursively) used by the app.
+// The return value maps full directory names to original import names.
+func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {
+ pkg, err := ctxt.ImportDir(srcDir, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // Resolve all non-standard-library imports
+ result := make(map[string]string)
+ for _, v := range pkg.Imports {
+ if !strings.Contains(v, ".") {
+ continue
+ }
+ src, err := findInGopath(v, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err)
+ }
+ if _, ok := result[src]; ok { // Already processed
+ continue
+ }
+ result[src] = v
+ im, err := imports(ctxt, src, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse package %v: %v", src, err)
+ }
+ for k, v := range im {
+ result[k] = v
+ }
+ }
+ return result, nil
+}
+
+// findInGopath searches the gopath for the named import directory.
+func findInGopath(dir string, gopath []string) (string, error) {
+ if v, ok := gopathCache[dir]; ok {
+ return v, nil
+ }
+ for _, v := range gopath {
+ dst := filepath.Join(v, "src", dir)
+ if _, err := os.Stat(dst); err == nil {
+ gopathCache[dir] = dst
+ return dst, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath)
+}
+
+// copyTree copies srcDir to dstDir relative to dstRoot, ignoring skipFiles.
+func copyTree(dstRoot, dstDir, srcDir string) error {
+ d := filepath.Join(dstRoot, dstDir)
+ if err := os.MkdirAll(d, 0755); err != nil {
+ return fmt.Errorf("unable to create directory %v: %v", d, err)
+ }
+
+ entries, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return fmt.Errorf("unable to read dir %v: %v", srcDir, err)
+ }
+ for _, entry := range entries {
+ n := entry.Name()
+ if skipFiles[n] {
+ continue
+ }
+ s := filepath.Join(srcDir, n)
+ d := filepath.Join(dstDir, n)
+ if entry.IsDir() {
+ if err := copyTree(dstRoot, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ continue
+ }
+ if err := copyFile(dstRoot, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ }
+ return nil
+}
+
+// copyFile copies src to dst relative to dstRoot.
+func copyFile(dstRoot, dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("unable to open %v: %v", src, err)
+ }
+ defer s.Close()
+
+ dst = filepath.Join(dstRoot, dst)
+ d, err := os.Create(dst)
+ if err != nil {
+ return fmt.Errorf("unable to create %v: %v", dst)
+ }
+ _, err = io.Copy(d, s)
+ if err != nil {
+ d.Close() // ignore error, copy already failed.
+ return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err)
+ }
+ if err := d.Close(); err != nil {
+ return fmt.Errorf("unable to close %v: %v", dst, err)
+ }
+ return nil
+}
+
+// appFiles returns a list of all Go source files in the app.
+func appFiles(ctxt *build.Context) ([]string, error) {
+ pkg, err := ctxt.ImportDir(".", 0)
+ if err != nil {
+ return nil, err
+ }
+ if !pkg.IsCommand() {
+ return nil, fmt.Errorf(`the root of your app needs to be package "main" (currently %q). Please see https://cloud.google.com/appengine/docs/go/managed-vms for more details on structuring your app.`, pkg.Name)
+ }
+ var appFiles []string
+ for _, f := range pkg.GoFiles {
+ n := filepath.Join(".", f)
+ appFiles = append(appFiles, n)
+ }
+ return appFiles, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 000000000..9422e4174
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,406 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ // ErrInvalidEntityType is returned when functions like Get or Next are
+ // passed a dst or src argument of invalid type.
+ ErrInvalidEntityType = errors.New("datastore: invalid entity type")
+ // ErrInvalidKey is returned when an invalid key is presented.
+ ErrInvalidKey = errors.New("datastore: invalid key")
+ // ErrNoSuchEntity is returned when no entity was found for a given key.
+ ErrNoSuchEntity = errors.New("datastore: no such entity")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument
+// passed to Get or to Iterator.Next.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+// protoToKey converts a Reference proto to a *Key.
+func protoToKey(r *pb.Reference) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Path.Element {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToProto converts a *Key to a Reference proto.
+func keyToProto(defaultAppID string, k *Key) *pb.Reference {
+ appID := k.appID
+ if appID == "" {
+ appID = defaultAppID
+ }
+ n := 0
+ for i := k; i != nil; i = i.parent {
+ n++
+ }
+ e := make([]*pb.Path_Element, n)
+ for i := k; i != nil; i = i.parent {
+ n--
+ e[n] = &pb.Path_Element{
+ Type: &i.kind,
+ }
+ // At most one of {Name,Id} should be set.
+ // Neither will be set for incomplete keys.
+ if i.stringID != "" {
+ e[n].Name = &i.stringID
+ } else if i.intID != 0 {
+ e[n].Id = &i.intID
+ }
+ }
+ var namespace *string
+ if k.namespace != "" {
+ namespace = proto.String(k.namespace)
+ }
+ return &pb.Reference{
+ App: proto.String(appID),
+ NameSpace: namespace,
+ Path: &pb.Path{
+ Element: e,
+ },
+ }
+}
+
+// multiKeyToProto is a batch version of keyToProto.
+func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
+ ret := make([]*pb.Reference, len(key))
+ for i, k := range key {
+ ret[i] = keyToProto(appID, k)
+ }
+ return ret
+}
+
+// multiValid is a batch version of Key.valid. It returns an error, not a
+// []bool.
+func multiValid(key []*Key) error {
+ invalid := false
+ for _, k := range key {
+ if !k.valid() {
+ invalid = true
+ break
+ }
+ }
+ if !invalid {
+ return nil
+ }
+ err := make(appengine.MultiError, len(key))
+ for i, k := range key {
+ if !k.valid() {
+ err[i] = ErrInvalidKey
+ }
+ }
+ return err
+}
+
+// It's unfortunate that the two semantically equivalent concepts pb.Reference
+// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
+// two have different protobuf field numbers.
+
+// referenceValueToKey is the same as protoToKey except the input is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Pathelement {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToReferenceValue is the same as keyToProto except the output is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
+ ref := keyToProto(defaultAppID, k)
+ pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
+ for i, e := range ref.Path.Element {
+ pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
+ Type: e.Type,
+ Id: e.Id,
+ Name: e.Name,
+ }
+ }
+ return &pb.PropertyValue_ReferenceValue{
+ App: ref.App,
+ NameSpace: ref.NameSpace,
+ Pathelement: pe,
+ }
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypePropertyLoadSaver
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+ multiArgTypeInterface
+)
+
+// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
+// type S, for some interface type I, or some non-interface non-pointer type P
+// such that P or *P implements PropertyLoadSaver.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S, I or P.
+//
+// As a special case, PropertyList is an invalid type for v.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ if v.Type() == typeOfPropertyList {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
+ return multiArgTypePropertyLoadSaver, elemType
+ }
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Interface:
+ return multiArgTypeInterface, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+// Get loads the entity stored for k into dst, which must be a struct pointer
+// or implement PropertyLoadSaver. If there is no such entity for the key, Get
+// returns ErrNoSuchEntity.
+//
+// The values of dst's unmatched struct fields are not modified, and matching
+// slice-typed fields are not reset before appending to them. In particular, it
+// is recommended to pass a pointer to a zero valued struct on each Get call.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer.
+func Get(c context.Context, key *Key, dst interface{}) error {
+ if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
+ return ErrInvalidEntityType
+ }
+ err := GetMulti(c, []*Key{key}, []interface{}{dst})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// GetMulti is a batch version of Get.
+//
+// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
+// type I, or some non-interface non-pointer type P such that P or *P
+// implements PropertyLoadSaver. If an []I, each element must be a valid dst
+// for Get: it must be a struct pointer or implement PropertyLoadSaver.
+//
+// As a special case, PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when []PropertyList was intended.
+func GetMulti(c context.Context, key []*Key, dst interface{}) error {
+ v := reflect.ValueOf(dst)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return errors.New("datastore: dst has invalid type")
+ }
+ if len(key) != v.Len() {
+ return errors.New("datastore: key and dst slices have different length")
+ }
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.GetRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.GetResponse{}
+ if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
+ return err
+ }
+ if len(key) != len(res.Entity) {
+ return errors.New("datastore: internal error: server returned the wrong number of entities")
+ }
+ multiErr, any := make(appengine.MultiError, len(key)), false
+ for i, e := range res.Entity {
+ if e.Entity == nil {
+ multiErr[i] = ErrNoSuchEntity
+ } else {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
+ elem.Set(reflect.New(elem.Type().Elem()))
+ }
+ multiErr[i] = loadEntity(elem.Interface(), e.Entity)
+ }
+ if multiErr[i] != nil {
+ any = true
+ }
+ }
+ if any {
+ return multiErr
+ }
+ return nil
+}
+
+// Put saves the entity src into the datastore with key k. src must be a struct
+// pointer or implement PropertyLoadSaver; if a struct pointer then any
+// unexported fields of that struct will be skipped. If k is an incomplete key,
+// the returned key will be a unique key generated by the datastore.
+func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
+ k, err := PutMulti(c, []*Key{key}, []interface{}{src})
+ if err != nil {
+ if me, ok := err.(appengine.MultiError); ok {
+ return nil, me[0]
+ }
+ return nil, err
+ }
+ return k[0], nil
+}
+
+// PutMulti is a batch version of Put.
+//
+// src must satisfy the same conditions as the dst argument to GetMulti.
+func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
+ v := reflect.ValueOf(src)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return nil, errors.New("datastore: src has invalid type")
+ }
+ if len(key) != v.Len() {
+ return nil, errors.New("datastore: key and src slices have different length")
+ }
+ if len(key) == 0 {
+ return nil, nil
+ }
+ appID := internal.FullyQualifiedAppID(c)
+ if err := multiValid(key); err != nil {
+ return nil, err
+ }
+ req := &pb.PutRequest{}
+ for i := range key {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ sProto, err := saveEntity(appID, key[i], elem.Interface())
+ if err != nil {
+ return nil, err
+ }
+ req.Entity = append(req.Entity, sProto)
+ }
+ res := &pb.PutResponse{}
+ if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
+ return nil, err
+ }
+ if len(key) != len(res.Key) {
+ return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
+ }
+ ret := make([]*Key, len(key))
+ for i := range ret {
+ var err error
+ ret[i], err = protoToKey(res.Key[i])
+ if err != nil || ret[i].Incomplete() {
+ return nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ }
+ return ret, nil
+}
+
+// Delete deletes the entity for the given key.
+func Delete(c context.Context, key *Key) error {
+ err := DeleteMulti(c, []*Key{key})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti is a batch version of Delete.
+func DeleteMulti(c context.Context, key []*Key) error {
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.DeleteRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.DeleteResponse{}
+ return internal.Call(c, "datastore_v3", "Delete", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ // pb.Query is the only type that has a name_space field.
+ // All other namespace support in datastore is in the keys.
+ switch m := m.(type) {
+ case *pb.Query:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ }
+}
+
+func init() {
+ internal.NamespaceMods["datastore_v3"] = namespaceMod
+ internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
+}
diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 000000000..4ee7d477c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,316 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package datastore provides a client for App Engine's datastore service.
+
+
+Basic Operations
+
+Entities are the unit of storage and are associated with a key. A key
+consists of an optional parent key, a string application ID, a string kind
+(also known as an entity type), and either a StringID or an IntID. A
+StringID is also known as an entity name or key name.
+
+It is valid to create a key with a zero StringID and a zero IntID; this is
+called an incomplete key, and does not refer to any saved entity. Putting an
+entity into the datastore under an incomplete key will cause a unique key
+to be generated for that entity, with a non-zero IntID.
+
+An entity's contents are a mapping from case-sensitive field names to values.
+Valid value types are:
+ - signed integers (int, int8, int16, int32 and int64),
+ - bool,
+ - string,
+ - float32 and float64,
+ - []byte (up to 1 megabyte in length),
+ - any type whose underlying type is one of the above predeclared types,
+ - ByteString,
+ - *Key,
+ - time.Time (stored with microsecond precision),
+ - appengine.BlobKey,
+ - appengine.GeoPoint,
+ - structs whose fields are all valid value types,
+ - slices of any of the above.
+
+Slices of structs are valid, as are structs that contain slices. However, if
+one struct contains another, then at most one of those can be repeated. This
+disqualifies recursively defined struct types: any struct T that (directly or
+indirectly) contains a []T.
+
+The Get and Put functions load and save an entity's contents. An entity's
+contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Entity struct {
+ Value string
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ k := datastore.NewKey(c, "Entity", "stringID", 0, nil)
+ e := new(Entity)
+ if err := datastore.Get(c, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ old := e.Value
+ e.Value = r.URL.Path
+
+ if _, err := datastore.Put(c, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
+ }
+
+GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
+Delete functions. They take a []*Key instead of a *Key, and may return an
+appengine.MultiError when encountering partial failure.
+
+
+Properties
+
+An entity's contents can be represented by a variety of types. These are
+typically struct pointers, but can also be any type that implements the
+PropertyLoadSaver interface. If using a struct pointer, you do not have to
+explicitly implement the PropertyLoadSaver interface; the datastore will
+automatically convert via reflection. If a struct pointer does implement that
+interface then those methods will be used in preference to the default
+behavior for struct pointers. Struct pointers are more strongly typed and are
+easier to use; PropertyLoadSavers are more flexible.
+
+The actual types passed do not have to match between Get and Put calls or even
+across different App Engine requests. It is valid to put a *PropertyList and
+get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
+Conceptually, any entity is saved as a sequence of properties, and is loaded
+into the destination value on a property-by-property basis. When loading into
+a struct pointer, an entity that cannot be completely represented (such as a
+missing field) will result in an ErrFieldMismatch error but it is up to the
+caller whether this error is fatal, recoverable or ignorable.
+
+By default, for struct pointers, all properties are potentially indexed, and
+the property name is the same as the field name (and hence must start with an
+upper case letter). Fields may have a `datastore:"name,options"` tag. The tag
+name is the property name, which must be one or more valid Go identifiers
+joined by ".", but may start with a lower case letter. An empty tag name means
+to just use the field name. A "-" tag name means that the datastore will
+ignore that field. If options is "noindex" then the field will not be indexed.
+If the options is "" then the comma may be omitted. There are no other
+recognized options.
+
+Fields (except for []byte) are indexed by default. Strings longer than 1500
+bytes cannot be indexed; fields used to store long strings should be
+tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be
+indexed.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and J are not indexed.
+ // D's tag is equivalent to having no tag at all (E).
+ // I is ignored entirely by the datastore.
+ // J has tag information for both the datastore and json packages.
+ type TaggedStruct struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ }
+
+
+Structured Properties
+
+If the struct pointed to contains other structs, then the nested or embedded
+structs are flattened. For example, given these definitions:
+
+ type Inner1 struct {
+ W int32
+ X string
+ }
+
+ type Inner2 struct {
+ Y float64
+ }
+
+ type Inner3 struct {
+ Z bool
+ }
+
+ type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+ }
+
+then an Outer's properties would be equivalent to those of:
+
+ type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+ }
+
+If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
+equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
+
+If an outer struct is tagged "noindex" then all of its implicit flattened
+fields are effectively "noindex".
+
+
+The PropertyLoadSaver Interface
+
+An entity's contents can also be represented by any type that implements the
+PropertyLoadSaver interface. This type may be a struct pointer, but it does
+not have to be. The datastore package will call Load when getting the entity's
+contents, and Save when putting the entity's contents.
+Possible uses include deriving non-stored fields, verifying fields, or indexing
+a field only if its value is positive.
+
+Example code:
+
+ type CustomPropsExample struct {
+ I, J int
+ // Sum is not stored, but should always be equal to I + J.
+ Sum int `datastore:"-"`
+ }
+
+ func (x *CustomPropsExample) Load(c <-chan Property) error {
+ // Load I and J as usual.
+ if err := datastore.LoadStruct(x, c); err != nil {
+ return err
+ }
+ // Derive the Sum field.
+ x.Sum = x.I + x.J
+ return nil
+ }
+
+ func (x *CustomPropsExample) Save(c chan<- Property) error {
+ defer close(c)
+ // Validate the Sum field.
+ if x.Sum != x.I + x.J {
+ return errors.New("CustomPropsExample has inconsistent sum")
+ }
+ // Save I and J as usual. The code below is equivalent to calling
+ // "return datastore.SaveStruct(x, c)", but is done manually for
+ // demonstration purposes.
+ c <- datastore.Property{
+ Name: "I",
+ Value: int64(x.I),
+ }
+ c <- datastore.Property{
+ Name: "J",
+ Value: int64(x.J),
+ }
+ return nil
+ }
+
+The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
+arbitrary entity's contents.
+
+
+Queries
+
+Queries retrieve entities based on their properties or key's ancestry. Running
+a query yields an iterator of results: either keys or (key, entity) pairs.
+Queries are re-usable and it is safe to call Query.Run from concurrent
+goroutines. Iterators are not safe for concurrent use.
+
+Queries are immutable, and are either created by calling NewQuery, or derived
+from an existing query by calling a method like Filter or Order that returns a
+new query value. A query is typically constructed by calling NewQuery followed
+by a chain of zero or more such methods. These methods are:
+ - Ancestor and Filter constrain the entities returned by running a query.
+ - Order affects the order in which they are returned.
+ - Project constrains the fields returned.
+ - Distinct de-duplicates projected entities.
+ - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
+ - Start, End, Offset and Limit define which sub-sequence of matching entities
+ to return. Start and End take cursors, Offset and Limit take integers. Start
+ and Offset affect the first result, End and Limit affect the last result.
+ If both Start and Offset are set, then the offset is relative to Start.
+ If both End and Limit are set, then the earliest constraint wins. Limit is
+ relative to Start+Offset, not relative to End. As a special case, a
+ negative limit means unlimited.
+
+Example code:
+
+ type Widget struct {
+ Description string
+ Price int
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ q := datastore.NewQuery("Widget").
+ Filter("Price <", 1000).
+ Order("-Price")
+ b := new(bytes.Buffer)
+ for t := q.Run(c); ; {
+ var x Widget
+ key, err := t.Next(&x)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ serveError(c, w, err)
+ return
+ }
+ fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ io.Copy(w, b)
+ }
+
+
+Transactions
+
+RunInTransaction runs a function in a transaction.
+
+Example code:
+
+ type Counter struct {
+ Count int
+ }
+
+ func inc(c appengine.Context, key *datastore.Key) (int, error) {
+ var x Counter
+ if err := datastore.Get(c, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
+ return 0, err
+ }
+ x.Count++
+ if _, err := datastore.Put(c, key, &x); err != nil {
+ return 0, err
+ }
+ return x.Count, nil
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ var count int
+ err := datastore.RunInTransaction(c, func(c appengine.Context) error {
+ var err1 error
+ count, err1 = inc(c, datastore.NewKey(c, "Counter", "singleton", 0, nil))
+ return err1
+ }, nil)
+ if err != nil {
+ serveError(c, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "Count=%d", count)
+ }
+*/
+package datastore
diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 000000000..ac1f00250
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,309 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+// Key represents the datastore key for a stored entity, and is immutable.
+type Key struct {
+ kind string
+ stringID string
+ intID int64
+ parent *Key
+ appID string
+ namespace string
+}
+
+// Kind returns the key's kind (also known as entity type).
+func (k *Key) Kind() string {
+ return k.kind
+}
+
+// StringID returns the key's string ID (also known as an entity name or key
+// name), which may be "".
+func (k *Key) StringID() string {
+ return k.stringID
+}
+
+// IntID returns the key's integer ID, which may be 0.
+func (k *Key) IntID() int64 {
+ return k.intID
+}
+
+// Parent returns the key's parent key, which may be nil.
+func (k *Key) Parent() *Key {
+ return k.parent
+}
+
+// AppID returns the key's application ID.
+func (k *Key) AppID() string {
+ return k.appID
+}
+
+// Namespace returns the key's namespace.
+func (k *Key) Namespace() string {
+ return k.namespace
+}
+
+// Incomplete returns whether the key does not refer to a stored entity.
+// In particular, whether the key has a zero StringID and a zero IntID.
+func (k *Key) Incomplete() bool {
+ return k.stringID == "" && k.intID == 0
+}
+
+// valid returns whether the key is valid.
+func (k *Key) valid() bool {
+ if k == nil {
+ return false
+ }
+ for ; k != nil; k = k.parent {
+ if k.kind == "" || k.appID == "" {
+ return false
+ }
+ if k.stringID != "" && k.intID != 0 {
+ return false
+ }
+ if k.parent != nil {
+ if k.parent.Incomplete() {
+ return false
+ }
+ if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Equal returns whether two keys are equal.
+func (k *Key) Equal(o *Key) bool {
+ for k != nil && o != nil {
+ if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
+ return false
+ }
+ k, o = k.parent, o.parent
+ }
+ return k == o
+}
+
+// root returns the furthest ancestor of a key, which may be itself.
+func (k *Key) root() *Key {
+ for k.parent != nil {
+ k = k.parent
+ }
+ return k
+}
+
+// marshal marshals the key's string representation to the buffer.
+func (k *Key) marshal(b *bytes.Buffer) {
+ if k.parent != nil {
+ k.parent.marshal(b)
+ }
+ b.WriteByte('/')
+ b.WriteString(k.kind)
+ b.WriteByte(',')
+ if k.stringID != "" {
+ b.WriteString(k.stringID)
+ } else {
+ b.WriteString(strconv.FormatInt(k.intID, 10))
+ }
+}
+
+// String returns a string representation of the key.
+func (k *Key) String() string {
+ if k == nil {
+ return ""
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 512))
+ k.marshal(b)
+ return b.String()
+}
+
+type gobKey struct {
+ Kind string
+ StringID string
+ IntID int64
+ Parent *gobKey
+ AppID string
+ Namespace string
+}
+
+func keyToGobKey(k *Key) *gobKey {
+ if k == nil {
+ return nil
+ }
+ return &gobKey{
+ Kind: k.kind,
+ StringID: k.stringID,
+ IntID: k.intID,
+ Parent: keyToGobKey(k.parent),
+ AppID: k.appID,
+ Namespace: k.namespace,
+ }
+}
+
+func gobKeyToKey(gk *gobKey) *Key {
+ if gk == nil {
+ return nil
+ }
+ return &Key{
+ kind: gk.Kind,
+ stringID: gk.StringID,
+ intID: gk.IntID,
+ parent: gobKeyToKey(gk.Parent),
+ appID: gk.AppID,
+ namespace: gk.Namespace,
+ }
+}
+
+func (k *Key) GobEncode() ([]byte, error) {
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (k *Key) GobDecode(buf []byte) error {
+ gk := new(gobKey)
+ if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
+ return err
+ }
+ *k = *gobKeyToKey(gk)
+ return nil
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + k.Encode() + `"`), nil
+}
+
+func (k *Key) UnmarshalJSON(buf []byte) error {
+ if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
+ return errors.New("datastore: bad JSON key")
+ }
+ k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
+ if err != nil {
+ return err
+ }
+ *k = *k2
+ return nil
+}
+
+// Encode returns an opaque representation of the key
+// suitable for use in HTML and URLs.
+// This is compatible with the Python and Java runtimes.
+func (k *Key) Encode() string {
+ ref := keyToProto("", k)
+
+ b, err := proto.Marshal(ref)
+ if err != nil {
+ panic(err)
+ }
+
+ // Trailing padding is stripped.
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// DecodeKey decodes a key from the opaque representation returned by Encode.
+func DecodeKey(encoded string) (*Key, error) {
+ // Re-add padding.
+ if m := len(encoded) % 4; m != 0 {
+ encoded += strings.Repeat("=", 4-m)
+ }
+
+ b, err := base64.URLEncoding.DecodeString(encoded)
+ if err != nil {
+ return nil, err
+ }
+
+ ref := new(pb.Reference)
+ if err := proto.Unmarshal(b, ref); err != nil {
+ return nil, err
+ }
+
+ return protoToKey(ref)
+}
+
+// NewIncompleteKey creates a new incomplete key.
+// kind cannot be empty.
+func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
+ return NewKey(c, kind, "", 0, parent)
+}
+
+// NewKey creates a new key.
+// kind cannot be empty.
+// Either one or both of stringID and intID must be zero. If both are zero,
+// the key returned is incomplete.
+// parent must either be a complete key or nil.
+func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
+ // If there's a parent key, use its namespace.
+ // Otherwise, use any namespace attached to the context.
+ var namespace string
+ if parent != nil {
+ namespace = parent.namespace
+ } else {
+ namespace = internal.NamespaceFromContext(c)
+ }
+
+ return &Key{
+ kind: kind,
+ stringID: stringID,
+ intID: intID,
+ parent: parent,
+ appID: internal.FullyQualifiedAppID(c),
+ namespace: namespace,
+ }
+}
+
+// AllocateIDs returns a range of n integer IDs with the given kind and parent
+// combination. kind cannot be empty; parent may be nil. The IDs in the range
+// returned will not be used by the datastore's automatic ID sequence generator
+// and may be used with NewKey without conflict.
+//
+// The range is inclusive at the low end and exclusive at the high end. In
+// other words, valid intIDs x satisfy low <= x && x < high.
+//
+// If no error is returned, low + n == high.
+func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
+ if kind == "" {
+ return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
+ }
+ if n < 0 {
+ return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
+ }
+ if n == 0 {
+ return 0, 0, nil
+ }
+ req := &pb.AllocateIdsRequest{
+ ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
+ Size: proto.Int64(int64(n)),
+ }
+ res := &pb.AllocateIdsResponse{}
+ if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
+ return 0, 0, err
+ }
+ // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
+ // is inclusive at the low end and exclusive at the high end, so we add 1.
+ low = res.GetStart()
+ high = res.GetEnd() + 1
+ if low+int64(n) != high {
+ return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
+ }
+ return low, high, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 000000000..3f3c80c36
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,334 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
+ typeOfByteSlice = reflect.TypeOf([]byte(nil))
+ typeOfByteString = reflect.TypeOf(ByteString(nil))
+ typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
+ typeOfTime = reflect.TypeOf(time.Time{})
+)
+
+// typeMismatchReason returns a string explaining why the property p could not
+// be stored in an entity field of type v.Type().
+func typeMismatchReason(p Property, v reflect.Value) string {
+ entityType := "empty"
+ switch p.Value.(type) {
+ case int64:
+ entityType = "int"
+ case bool:
+ entityType = "bool"
+ case string:
+ entityType = "string"
+ case float64:
+ entityType = "float"
+ case *Key:
+ entityType = "*datastore.Key"
+ case time.Time:
+ entityType = "time.Time"
+ case appengine.BlobKey:
+ entityType = "appengine.BlobKey"
+ case appengine.GeoPoint:
+ entityType = "appengine.GeoPoint"
+ case ByteString:
+ entityType = "datastore.ByteString"
+ case []byte:
+ entityType = "[]byte"
+ }
+ return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
+}
+
+type propertyLoader struct {
+ // m holds the number of times a substruct field like "Foo.Bar.Baz" has
+ // been seen so far. The map is constructed lazily.
+ m map[string]int
+}
+
+func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
+ var v reflect.Value
+ // Traverse a struct's struct-typed fields.
+ for name := p.Name; ; {
+ decoder, ok := codec.byName[name]
+ if !ok {
+ return "no such struct field"
+ }
+ v = structValue.Field(decoder.index)
+ if !v.IsValid() {
+ return "no such struct field"
+ }
+ if !v.CanSet() {
+ return "cannot set struct field"
+ }
+
+ if decoder.substructCodec == nil {
+ break
+ }
+
+ if v.Kind() == reflect.Slice {
+ if l.m == nil {
+ l.m = make(map[string]int)
+ }
+ index := l.m[p.Name]
+ l.m[p.Name] = index + 1
+ for v.Len() <= index {
+ v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
+ }
+ structValue = v.Index(index)
+ requireSlice = false
+ } else {
+ structValue = v
+ }
+ // Strip the "I." from "I.X".
+ name = name[len(codec.byIndex[decoder.index].name):]
+ codec = decoder.substructCodec
+ }
+
+ var slice reflect.Value
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ slice = v
+ v = reflect.New(v.Type().Elem()).Elem()
+ } else if requireSlice {
+ return "multiple-valued property requires a slice field type"
+ }
+
+ // Convert indexValues to a Go value with a meaning derived from the
+ // destination type.
+ pValue := p.Value
+ if iv, ok := pValue.(indexValue); ok {
+ meaning := pb.Property_NO_MEANING
+ switch v.Type() {
+ case typeOfBlobKey:
+ meaning = pb.Property_BLOBKEY
+ case typeOfByteSlice:
+ meaning = pb.Property_BLOB
+ case typeOfByteString:
+ meaning = pb.Property_BYTESTRING
+ case typeOfGeoPoint:
+ meaning = pb.Property_GEORSS_POINT
+ case typeOfTime:
+ meaning = pb.Property_GD_WHEN
+ }
+ var err error
+ pValue, err = propValue(iv.value, meaning)
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x, ok := pValue.(int64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.OverflowInt(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetInt(x)
+ case reflect.Bool:
+ x, ok := pValue.(bool)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.SetBool(x)
+ case reflect.String:
+ switch x := pValue.(type) {
+ case appengine.BlobKey:
+ v.SetString(string(x))
+ case ByteString:
+ v.SetString(string(x))
+ case string:
+ v.SetString(x)
+ default:
+ if pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ x, ok := pValue.(float64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.OverflowFloat(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetFloat(x)
+ case reflect.Ptr:
+ x, ok := pValue.(*Key)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if _, ok := v.Interface().(*Key); !ok {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case reflect.Struct:
+ switch v.Type() {
+ case typeOfTime:
+ x, ok := pValue.(time.Time)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case typeOfGeoPoint:
+ x, ok := pValue.(appengine.GeoPoint)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ default:
+ return typeMismatchReason(p, v)
+ }
+ case reflect.Slice:
+ x, ok := pValue.([]byte)
+ if !ok {
+ if y, yok := pValue.(ByteString); yok {
+ x, ok = []byte(y), true
+ }
+ }
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ return typeMismatchReason(p, v)
+ }
+ v.SetBytes(x)
+ default:
+ return typeMismatchReason(p, v)
+ }
+ if slice.IsValid() {
+ slice.Set(reflect.Append(slice, v))
+ }
+ return ""
+}
+
+// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
+func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
+ props, err := protoToProperties(src)
+ if err != nil {
+ return err
+ }
+ if e, ok := dst.(PropertyLoadSaver); ok {
+ return e.Load(props)
+ }
+ return LoadStruct(dst, props)
+}
+
+func (s structPLS) Load(props []Property) error {
+ var fieldName, reason string
+ var l propertyLoader
+ for _, p := range props {
+ if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
+ // We don't return early, as we try to load as many properties as possible.
+ // It is valid to load an entity into a struct that cannot fully represent it.
+ // That case returns an error, but the caller is free to ignore it.
+ fieldName, reason = p.Name, errStr
+ }
+ }
+ if reason != "" {
+ return &ErrFieldMismatch{
+ StructType: s.v.Type(),
+ FieldName: fieldName,
+ Reason: reason,
+ }
+ }
+ return nil
+}
+
+func protoToProperties(src *pb.EntityProto) ([]Property, error) {
+ props, rawProps := src.Property, src.RawProperty
+ out := make([]Property, 0, len(props)+len(rawProps))
+ for {
+ var (
+ x *pb.Property
+ noIndex bool
+ )
+ if len(props) > 0 {
+ x, props = props[0], props[1:]
+ } else if len(rawProps) > 0 {
+ x, rawProps = rawProps[0], rawProps[1:]
+ noIndex = true
+ } else {
+ break
+ }
+
+ var value interface{}
+ if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
+ value = indexValue{x.Value}
+ } else {
+ var err error
+ value, err = propValue(x.Value, x.GetMeaning())
+ if err != nil {
+ return nil, err
+ }
+ }
+ out = append(out, Property{
+ Name: x.GetName(),
+ Value: value,
+ NoIndex: noIndex,
+ Multiple: x.GetMultiple(),
+ })
+ }
+ return out, nil
+}
+
+// propValue returns a Go value that combines the raw PropertyValue with a
+// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
+func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
+ switch {
+ case v.Int64Value != nil:
+ if m == pb.Property_GD_WHEN {
+ return fromUnixMicro(*v.Int64Value), nil
+ } else {
+ return *v.Int64Value, nil
+ }
+ case v.BooleanValue != nil:
+ return *v.BooleanValue, nil
+ case v.StringValue != nil:
+ if m == pb.Property_BLOB {
+ return []byte(*v.StringValue), nil
+ } else if m == pb.Property_BLOBKEY {
+ return appengine.BlobKey(*v.StringValue), nil
+ } else if m == pb.Property_BYTESTRING {
+ return ByteString(*v.StringValue), nil
+ } else {
+ return *v.StringValue, nil
+ }
+ case v.DoubleValue != nil:
+ return *v.DoubleValue, nil
+ case v.Referencevalue != nil:
+ key, err := referenceValueToKey(v.Referencevalue)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+ case v.Pointvalue != nil:
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
+ }
+ return nil, nil
+}
+
+// indexValue is a Property value that is created when entities are loaded from
+// an index, such as from a projection query.
+//
+// Such Property values do not contain all of the metadata required to be
+// faithfully represented as a Go value, and are instead represented as an
+// opaque indexValue. Load the properties into a concrete struct type (e.g. by
+// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
+// of type int, string, time.Time, etc.
+type indexValue struct {
+ value *pb.PropertyValue
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 000000000..3caef9a3b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,294 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// Entities with more than this many indexed properties will not be saved.
+const maxIndexedProperties = 20000
+
+// []byte fields more than 1 megabyte long will not be loaded or saved.
+const maxBlobLen = 1 << 20
+
+// Property is a name/value pair plus some metadata. A datastore entity's
+// contents are loaded and saved as a sequence of Properties. An entity can
+// have multiple Properties with the same name, provided that p.Multiple is
+// true on all of that entity's Properties with that name.
+type Property struct {
+ // Name is the property name.
+ Name string
+ // Value is the property value. The valid types are:
+ // - int64
+ // - bool
+ // - string
+ // - float64
+ // - ByteString
+ // - *Key
+ // - time.Time
+ // - appengine.BlobKey
+ // - appengine.GeoPoint
+ // - []byte (up to 1 megabyte in length)
+ // This set is smaller than the set of valid struct field types that the
+ // datastore can load and save. A Property Value cannot be a slice (apart
+ // from []byte); use multiple Properties instead. Also, a Value's type
+ // must be explicitly on the list above; it is not sufficient for the
+ // underlying type to be on that list. For example, a Value of "type
+ // myInt64 int64" is invalid. Smaller-width integers and floats are also
+ // invalid. Again, this is more restrictive than the set of valid struct
+ // field types.
+ //
+ // A Value will have an opaque type when loading entities from an index,
+ // such as via a projection query. Load entities into a struct instead
+ // of a PropertyLoadSaver when using a projection query.
+ //
+ // A Value may also be the nil interface value; this is equivalent to
+ // Python's None but not directly representable by a Go struct. Loading
+ // a nil-valued property into a struct will set that field to the zero
+ // value.
+ Value interface{}
+ // NoIndex is whether the datastore cannot index this property.
+ NoIndex bool
+ // Multiple is whether the entity can have multiple properties with
+ // the same name. Even if a particular instance only has one property with
+ // a certain name, Multiple should be true if a struct would best represent
+ // it as a field of type []T instead of type T.
+ Multiple bool
+}
+
+// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
+type ByteString []byte
+
+// PropertyLoadSaver can be converted from and to a slice of Properties.
+type PropertyLoadSaver interface {
+ Load([]Property) error
+ Save() ([]Property, error)
+}
+
+// PropertyList converts a []Property to implement PropertyLoadSaver.
+type PropertyList []Property
+
+var (
+ typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
+ typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
+)
+
+// Load loads all of the provided properties into l.
+// It does not first reset *l to an empty slice.
+func (l *PropertyList) Load(p []Property) error {
+ *l = append(*l, p...)
+ return nil
+}
+
+// Save saves all of l's properties as a slice or Properties.
+func (l *PropertyList) Save() ([]Property, error) {
+ return *l, nil
+}
+
+// validPropertyName returns whether name consists of one or more valid Go
+// identifiers joined by ".".
+func validPropertyName(name string) bool {
+ if name == "" {
+ return false
+ }
+ for _, s := range strings.Split(name, ".") {
+ if s == "" {
+ return false
+ }
+ first := true
+ for _, c := range s {
+ if first {
+ first = false
+ if c != '_' && !unicode.IsLetter(c) {
+ return false
+ }
+ } else {
+ if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// structTag is the parsed `datastore:"name,options"` tag of a struct field.
+// If a field has no tag, or the tag has an empty name, then the structTag's
+// name is just the field name. A "-" name means that the datastore ignores
+// that field.
+type structTag struct {
+ name string
+ noIndex bool
+}
+
+// structCodec describes how to convert a struct to and from a sequence of
+// properties.
+type structCodec struct {
+ // byIndex gives the structTag for the i'th field.
+ byIndex []structTag
+ // byName gives the field codec for the structTag with the given name.
+ byName map[string]fieldCodec
+ // hasSlice is whether a struct or any of its nested or embedded structs
+ // has a slice-typed field (other than []byte).
+ hasSlice bool
+ // complete is whether the structCodec is complete. An incomplete
+ // structCodec may be encountered when walking a recursive struct.
+ complete bool
+}
+
+// fieldCodec is a struct field's index and, if that struct field's type is
+// itself a struct, that substruct's structCodec.
+type fieldCodec struct {
+ index int
+ substructCodec *structCodec
+}
+
+// structCodecs collects the structCodecs that have already been calculated.
+var (
+ structCodecsMutex sync.Mutex
+ structCodecs = make(map[reflect.Type]*structCodec)
+)
+
+// getStructCodec returns the structCodec for the given struct type.
+func getStructCodec(t reflect.Type) (*structCodec, error) {
+ structCodecsMutex.Lock()
+ defer structCodecsMutex.Unlock()
+ return getStructCodecLocked(t)
+}
+
+// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
+// be held when calling this function.
+func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
+ c, ok := structCodecs[t]
+ if ok {
+ return c, nil
+ }
+ c = &structCodec{
+ byIndex: make([]structTag, t.NumField()),
+ byName: make(map[string]fieldCodec),
+ }
+
+ // Add c to the structCodecs map before we are sure it is good. If t is
+ // a recursive type, it needs to find the incomplete entry for itself in
+ // the map.
+ structCodecs[t] = c
+ defer func() {
+ if retErr != nil {
+ delete(structCodecs, t)
+ }
+ }()
+
+ for i := range c.byIndex {
+ f := t.Field(i)
+ name, opts := f.Tag.Get("datastore"), ""
+ if i := strings.Index(name, ","); i != -1 {
+ name, opts = name[:i], name[i+1:]
+ }
+ if name == "" {
+ if !f.Anonymous {
+ name = f.Name
+ }
+ } else if name == "-" {
+ c.byIndex[i] = structTag{name: name}
+ continue
+ } else if !validPropertyName(name) {
+ return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
+ }
+
+ substructType, fIsSlice := reflect.Type(nil), false
+ switch f.Type.Kind() {
+ case reflect.Struct:
+ substructType = f.Type
+ case reflect.Slice:
+ if f.Type.Elem().Kind() == reflect.Struct {
+ substructType = f.Type.Elem()
+ }
+ fIsSlice = f.Type != typeOfByteSlice
+ c.hasSlice = c.hasSlice || fIsSlice
+ }
+
+ if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
+ if name != "" {
+ name = name + "."
+ }
+ sub, err := getStructCodecLocked(substructType)
+ if err != nil {
+ return nil, err
+ }
+ if !sub.complete {
+ return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
+ }
+ if fIsSlice && sub.hasSlice {
+ return nil, fmt.Errorf(
+ "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
+ }
+ c.hasSlice = c.hasSlice || sub.hasSlice
+ for relName := range sub.byName {
+ absName := name + relName
+ if _, ok := c.byName[absName]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName)
+ }
+ c.byName[absName] = fieldCodec{index: i, substructCodec: sub}
+ }
+ } else {
+ if _, ok := c.byName[name]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
+ }
+ c.byName[name] = fieldCodec{index: i}
+ }
+
+ c.byIndex[i] = structTag{
+ name: name,
+ noIndex: opts == "noindex",
+ }
+ }
+ c.complete = true
+ return c, nil
+}
+
+// structPLS adapts a struct to be a PropertyLoadSaver.
+type structPLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+// newStructPLS returns a PropertyLoadSaver for the struct pointer p.
+func newStructPLS(p interface{}) (PropertyLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidEntityType
+ }
+ v = v.Elem()
+ codec, err := getStructCodec(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return structPLS{v, codec}, nil
+}
+
+// LoadStruct loads the properties from p to dst.
+// dst must be a struct pointer.
+func LoadStruct(dst interface{}, p []Property) error {
+ x, err := newStructPLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(p)
+}
+
+// SaveStruct returns the properties from src as a slice of Properties.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Property, error) {
+ x, err := newStructPLS(src)
+ if err != nil {
+ return nil, err
+ }
+ return x.Save()
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 000000000..696a5d3e2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,713 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type operator int
+
+const (
+ lessThan operator = iota
+ lessEq
+ equal
+ greaterEq
+ greaterThan
+)
+
+var operatorToProto = map[operator]*pb.Query_Filter_Operator{
+ lessThan: pb.Query_Filter_LESS_THAN.Enum(),
+ lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
+ equal: pb.Query_Filter_EQUAL.Enum(),
+ greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
+ greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
+}
+
+// filter is a conditional filter on query results.
+type filter struct {
+ FieldName string
+ Op operator
+ Value interface{}
+}
+
+type sortDirection int
+
+const (
+ ascending sortDirection = iota
+ descending
+)
+
+var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
+ ascending: pb.Query_Order_ASCENDING.Enum(),
+ descending: pb.Query_Order_DESCENDING.Enum(),
+}
+
+// order is a sort order on query results.
+type order struct {
+ FieldName string
+ Direction sortDirection
+}
+
+// NewQuery creates a new Query for a specific entity kind.
+//
+// An empty kind means to return all entities, including entities created and
+// managed by other App Engine features, and is called a kindless query.
+// Kindless queries cannot include filters or sort orders on property values.
+func NewQuery(kind string) *Query {
+ return &Query{
+ kind: kind,
+ limit: -1,
+ }
+}
+
+// Query represents a datastore query.
+type Query struct {
+ kind string
+ ancestor *Key
+ filter []filter
+ order []order
+ projection []string
+
+ distinct bool
+ keysOnly bool
+ eventual bool
+ limit int32
+ offset int32
+ start *pb.CompiledCursor
+ end *pb.CompiledCursor
+
+ err error
+}
+
+func (q *Query) clone() *Query {
+ x := *q
+ // Copy the contents of the slice-typed fields to a new backing store.
+ if len(q.filter) > 0 {
+ x.filter = make([]filter, len(q.filter))
+ copy(x.filter, q.filter)
+ }
+ if len(q.order) > 0 {
+ x.order = make([]order, len(q.order))
+ copy(x.order, q.order)
+ }
+ return &x
+}
+
+// Ancestor returns a derivative query with an ancestor filter.
+// The ancestor should not be nil.
+func (q *Query) Ancestor(ancestor *Key) *Query {
+ q = q.clone()
+ if ancestor == nil {
+ q.err = errors.New("datastore: nil query ancestor")
+ return q
+ }
+ q.ancestor = ancestor
+ return q
+}
+
+// EventualConsistency returns a derivative query that returns eventually
+// consistent results.
+// It only has an effect on ancestor queries.
+func (q *Query) EventualConsistency() *Query {
+ q = q.clone()
+ q.eventual = true
+ return q
+}
+
+// Filter returns a derivative query with a field-based filter.
+// The filterStr argument must be a field name followed by optional space,
+// followed by an operator, one of ">", "<", ">=", "<=", or "=".
+// Fields are compared against the provided value using the operator.
+// Multiple filters are AND'ed together.
+func (q *Query) Filter(filterStr string, value interface{}) *Query {
+ q = q.clone()
+ filterStr = strings.TrimSpace(filterStr)
+ if len(filterStr) < 1 {
+ q.err = errors.New("datastore: invalid filter: " + filterStr)
+ return q
+ }
+ f := filter{
+ FieldName: strings.TrimRight(filterStr, " ><=!"),
+ Value: value,
+ }
+ switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
+ case "<=":
+ f.Op = lessEq
+ case ">=":
+ f.Op = greaterEq
+ case "<":
+ f.Op = lessThan
+ case ">":
+ f.Op = greaterThan
+ case "=":
+ f.Op = equal
+ default:
+ q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
+ return q
+ }
+ q.filter = append(q.filter, f)
+ return q
+}
+
+// Order returns a derivative query with a field-based sort order. Orders are
+// applied in the order they are added. The default order is ascending; to sort
+// in descending order prefix the fieldName with a minus sign (-).
+func (q *Query) Order(fieldName string) *Query {
+ q = q.clone()
+ fieldName = strings.TrimSpace(fieldName)
+ o := order{
+ Direction: ascending,
+ FieldName: fieldName,
+ }
+ if strings.HasPrefix(fieldName, "-") {
+ o.Direction = descending
+ o.FieldName = strings.TrimSpace(fieldName[1:])
+ } else if strings.HasPrefix(fieldName, "+") {
+ q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
+ return q
+ }
+ if len(o.FieldName) == 0 {
+ q.err = errors.New("datastore: empty order")
+ return q
+ }
+ q.order = append(q.order, o)
+ return q
+}
+
+// Project returns a derivative query that yields only the given fields. It
+// cannot be used with KeysOnly.
+func (q *Query) Project(fieldNames ...string) *Query {
+ q = q.clone()
+ q.projection = append([]string(nil), fieldNames...)
+ return q
+}
+
+// Distinct returns a derivative query that yields de-duplicated entities with
+// respect to the set of projected fields. It is only used for projection
+// queries.
+func (q *Query) Distinct() *Query {
+ q = q.clone()
+ q.distinct = true
+ return q
+}
+
+// KeysOnly returns a derivative query that yields only keys, not keys and
+// entities. It cannot be used with projection queries.
+func (q *Query) KeysOnly() *Query {
+ q = q.clone()
+ q.keysOnly = true
+ return q
+}
+
+// Limit returns a derivative query that has a limit on the number of results
+// returned. A negative value means unlimited.
+func (q *Query) Limit(limit int) *Query {
+ q = q.clone()
+ if limit < math.MinInt32 || limit > math.MaxInt32 {
+ q.err = errors.New("datastore: query limit overflow")
+ return q
+ }
+ q.limit = int32(limit)
+ return q
+}
+
+// Offset returns a derivative query that has an offset of how many keys to
+// skip over before returning results. A negative value is invalid.
+func (q *Query) Offset(offset int) *Query {
+ q = q.clone()
+ if offset < 0 {
+ q.err = errors.New("datastore: negative query offset")
+ return q
+ }
+ if offset > math.MaxInt32 {
+ q.err = errors.New("datastore: query offset overflow")
+ return q
+ }
+ q.offset = int32(offset)
+ return q
+}
+
+// Start returns a derivative query with the given start point.
+func (q *Query) Start(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.start = c.cc
+ return q
+}
+
+// End returns a derivative query with the given end point.
+func (q *Query) End(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.end = c.cc
+ return q
+}
+
+// toProto converts the query to a protocol buffer.
+func (q *Query) toProto(dst *pb.Query, appID string) error {
+ if len(q.projection) != 0 && q.keysOnly {
+ return errors.New("datastore: query cannot both project and be keys-only")
+ }
+ dst.Reset()
+ dst.App = proto.String(appID)
+ if q.kind != "" {
+ dst.Kind = proto.String(q.kind)
+ }
+ if q.ancestor != nil {
+ dst.Ancestor = keyToProto(appID, q.ancestor)
+ if q.eventual {
+ dst.Strong = proto.Bool(false)
+ }
+ }
+ if q.projection != nil {
+ dst.PropertyName = q.projection
+ if q.distinct {
+ dst.GroupByPropertyName = q.projection
+ }
+ }
+ if q.keysOnly {
+ dst.KeysOnly = proto.Bool(true)
+ dst.RequirePerfectPlan = proto.Bool(true)
+ }
+ for _, qf := range q.filter {
+ if qf.FieldName == "" {
+ return errors.New("datastore: empty query filter field name")
+ }
+ p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
+ if errStr != "" {
+ return errors.New("datastore: bad query filter value type: " + errStr)
+ }
+ xf := &pb.Query_Filter{
+ Op: operatorToProto[qf.Op],
+ Property: []*pb.Property{p},
+ }
+ if xf.Op == nil {
+ return errors.New("datastore: unknown query filter operator")
+ }
+ dst.Filter = append(dst.Filter, xf)
+ }
+ for _, qo := range q.order {
+ if qo.FieldName == "" {
+ return errors.New("datastore: empty query order field name")
+ }
+ xo := &pb.Query_Order{
+ Property: proto.String(qo.FieldName),
+ Direction: sortDirectionToProto[qo.Direction],
+ }
+ if xo.Direction == nil {
+ return errors.New("datastore: unknown query order direction")
+ }
+ dst.Order = append(dst.Order, xo)
+ }
+ if q.limit >= 0 {
+ dst.Limit = proto.Int32(q.limit)
+ }
+ if q.offset != 0 {
+ dst.Offset = proto.Int32(q.offset)
+ }
+ dst.CompiledCursor = q.start
+ dst.EndCompiledCursor = q.end
+ dst.Compile = proto.Bool(true)
+ return nil
+}
+
+// Count returns the number of results for the query.
+func (q *Query) Count(c context.Context) (int, error) {
+ // Check that the query is well-formed.
+ if q.err != nil {
+ return 0, q.err
+ }
+
+ // Run a copy of the query, with keysOnly true (if we're not a projection,
+ // since the two are incompatible), and an adjusted offset. We also set the
+ // limit to zero, as we don't want any actual entity data, just the number
+ // of skipped results.
+ newQ := q.clone()
+ newQ.keysOnly = len(newQ.projection) == 0
+ newQ.limit = 0
+ if q.limit < 0 {
+ // If the original query was unlimited, set the new query's offset to maximum.
+ newQ.offset = math.MaxInt32
+ } else {
+ newQ.offset = q.offset + q.limit
+ if newQ.offset < 0 {
+ // Do the best we can, in the presence of overflow.
+ newQ.offset = math.MaxInt32
+ }
+ }
+ req := &pb.Query{}
+ if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
+ return 0, err
+ }
+ res := &pb.QueryResult{}
+ if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
+ return 0, err
+ }
+
+ // n is the count we will return. For example, suppose that our original
+ // query had an offset of 4 and a limit of 2008: the count will be 2008,
+ // provided that there are at least 2012 matching entities. However, the
+ // RPCs will only skip 1000 results at a time. The RPC sequence is:
+ // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 1000
+ // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 2000
+ // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (12, false)
+ // n += 12 // n == 2012
+ // // exit the loop
+ // n -= 4 // n == 2008
+ var n int32
+ for {
+ // The QueryResult should have no actual entity data, just skipped results.
+ if len(res.Result) != 0 {
+ return 0, errors.New("datastore: internal error: Count request returned too much data")
+ }
+ n += res.GetSkippedResults()
+ if !res.GetMoreResults() {
+ break
+ }
+ if err := callNext(c, res, newQ.offset-n, 0); err != nil {
+ return 0, err
+ }
+ }
+ n -= q.offset
+ if n < 0 {
+ // If the offset was greater than the number of matching entities,
+ // return 0 instead of negative.
+ n = 0
+ }
+ return int(n), nil
+}
+
+// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
+// returned by a query with more results.
+func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error {
+ if res.Cursor == nil {
+ return errors.New("datastore: internal error: server did not return a cursor")
+ }
+ req := &pb.NextRequest{
+ Cursor: res.Cursor,
+ }
+ if limit >= 0 {
+ req.Count = proto.Int32(limit)
+ }
+ if offset != 0 {
+ req.Offset = proto.Int32(offset)
+ }
+ if res.CompiledCursor != nil {
+ req.Compile = proto.Bool(true)
+ }
+ res.Reset()
+ return internal.Call(c, "datastore_v3", "Next", req, res)
+}
+
+// GetAll runs the query in the given context and returns all keys that match
+// that query, as well as appending the values to dst.
+//
+// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
+// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
+//
+// As a special case, *PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when *[]PropertyList was intended.
+//
+// The keys returned by GetAll will be in a 1-1 correspondence with the entities
+// added to dst.
+//
+// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
+func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
+ var (
+ dv reflect.Value
+ mat multiArgType
+ elemType reflect.Type
+ errFieldMismatch error
+ )
+ if !q.keysOnly {
+ dv = reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return nil, ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType = checkMultiArg(dv)
+ if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
+ return nil, ErrInvalidEntityType
+ }
+ }
+
+ var keys []*Key
+ for t := q.Run(c); ; {
+ k, e, err := t.next()
+ if err == Done {
+ break
+ }
+ if err != nil {
+ return keys, err
+ }
+ if !q.keysOnly {
+ ev := reflect.New(elemType)
+ if elemType.Kind() == reflect.Map {
+ // This is a special case. The zero values of a map type are
+ // not immediately useful; they have to be make'd.
+ //
+ // Funcs and channels are similar, in that a zero value is not useful,
+ // but even a freshly make'd channel isn't useful: there's no fixed
+ // channel buffer size that is always going to be large enough, and
+ // there's no goroutine to drain the other end. Theoretically, these
+ // types could be supported, for example by sniffing for a constructor
+ // method or requiring prior registration, but for now it's not a
+ // frequent enough concern to be worth it. Programmers can work around
+ // it by explicitly using Iterator.Next instead of the Query.GetAll
+ // convenience method.
+ x := reflect.MakeMap(elemType)
+ ev.Elem().Set(x)
+ }
+ if err = loadEntity(ev.Interface(), e); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return keys, err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ }
+ keys = append(keys, k)
+ }
+ return keys, errFieldMismatch
+}
+
+// Run runs the query in the given context.
+func (q *Query) Run(c context.Context) *Iterator {
+ if q.err != nil {
+ return &Iterator{err: q.err}
+ }
+ t := &Iterator{
+ c: c,
+ limit: q.limit,
+ q: q,
+ prevCC: q.start,
+ }
+ var req pb.Query
+ if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
+ t.err = err
+ return t
+ }
+ if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
+ t.err = err
+ return t
+ }
+ offset := q.offset - t.res.GetSkippedResults()
+ for offset > 0 && t.res.GetMoreResults() {
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
+ t.err = err
+ break
+ }
+ skip := t.res.GetSkippedResults()
+ if skip < 0 {
+ t.err = errors.New("datastore: internal error: negative number of skipped_results")
+ break
+ }
+ offset -= skip
+ }
+ if offset < 0 {
+ t.err = errors.New("datastore: internal error: query offset was overshot")
+ }
+ return t
+}
+
+// Iterator is the result of running a query.
+type Iterator struct {
+ c context.Context
+ err error
+ // res is the result of the most recent RunQuery or Next API call.
+ res pb.QueryResult
+ // i is how many elements of res.Result we have iterated over.
+ i int
+ // limit is the limit on the number of results this iterator should return.
+ // A negative value means unlimited.
+ limit int32
+ // q is the original query which yielded this iterator.
+ q *Query
+ // prevCC is the compiled cursor that marks the end of the previous batch
+ // of results.
+ prevCC *pb.CompiledCursor
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("datastore: query has no more results")
+
+// Next returns the key of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// If the query is not keys only and dst is non-nil, it also loads the entity
+// stored for that key into the struct pointer or PropertyLoadSaver dst, with
+// the same semantics and possible errors as for the Get function.
+func (t *Iterator) Next(dst interface{}) (*Key, error) {
+ k, e, err := t.next()
+ if err != nil {
+ return nil, err
+ }
+ if dst != nil && !t.q.keysOnly {
+ err = loadEntity(dst, e)
+ }
+ return k, err
+}
+
+func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
+ if t.err != nil {
+ return nil, nil, t.err
+ }
+
+ // Issue datastore_v3/Next RPCs as necessary.
+ for t.i == len(t.res.Result) {
+ if !t.res.GetMoreResults() {
+ t.err = Done
+ return nil, nil, t.err
+ }
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
+ t.err = err
+ return nil, nil, t.err
+ }
+ if t.res.GetSkippedResults() != 0 {
+ t.err = errors.New("datastore: internal error: iterator has skipped results")
+ return nil, nil, t.err
+ }
+ t.i = 0
+ if t.limit >= 0 {
+ t.limit -= int32(len(t.res.Result))
+ if t.limit < 0 {
+ t.err = errors.New("datastore: internal error: query returned more results than the limit")
+ return nil, nil, t.err
+ }
+ }
+ }
+
+ // Extract the key from the t.i'th element of t.res.Result.
+ e := t.res.Result[t.i]
+ t.i++
+ if e.Key == nil {
+ return nil, nil, errors.New("datastore: internal error: server did not return a key")
+ }
+ k, err := protoToKey(e.Key)
+ if err != nil || k.Incomplete() {
+ return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ return k, e, nil
+}
+
+// Cursor returns a cursor for the iterator's current location.
+func (t *Iterator) Cursor() (Cursor, error) {
+ if t.err != nil && t.err != Done {
+ return Cursor{}, t.err
+ }
+ // If we are at either end of the current batch of results,
+ // return the compiled cursor at that end.
+ skipped := t.res.GetSkippedResults()
+ if t.i == 0 && skipped == 0 {
+ if t.prevCC == nil {
+ // A nil pointer (of type *pb.CompiledCursor) means no constraint:
+ // passing it as the end cursor of a new query means unlimited results
+ // (glossing over the integer limit parameter for now).
+ // A non-nil pointer to an empty pb.CompiledCursor means the start:
+ // passing it as the end cursor of a new query means 0 results.
+ // If prevCC was nil, then the original query had no start cursor, but
+ // Iterator.Cursor should return "the start" instead of unlimited.
+ return Cursor{&zeroCC}, nil
+ }
+ return Cursor{t.prevCC}, nil
+ }
+ if t.i == len(t.res.Result) {
+ return Cursor{t.res.CompiledCursor}, nil
+ }
+ // Otherwise, re-run the query offset to this iterator's position, starting from
+ // the most recent compiled cursor. This is done on a best-effort basis, as it
+ // is racy; if a concurrent process has added or removed entities, then the
+ // cursor returned may be inconsistent.
+ q := t.q.clone()
+ q.start = t.prevCC
+ q.offset = skipped + int32(t.i)
+ q.limit = 0
+ q.keysOnly = len(q.projection) == 0
+ t1 := q.Run(t.c)
+ _, _, err := t1.next()
+ if err != Done {
+ if err == nil {
+ err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
+ }
+ return Cursor{}, err
+ }
+ return Cursor{t1.res.CompiledCursor}, nil
+}
+
+var zeroCC pb.CompiledCursor
+
+// Cursor is an iterator's position. It can be converted to and from an opaque
+// string. A cursor can be used from different HTTP requests, but only with a
+// query with the same kind, ancestor, filter and order constraints.
+type Cursor struct {
+ cc *pb.CompiledCursor
+}
+
+// String returns a base-64 string representation of a cursor.
+func (c Cursor) String() string {
+ if c.cc == nil {
+ return ""
+ }
+ b, err := proto.Marshal(c.cc)
+ if err != nil {
+ // The only way to construct a Cursor with a non-nil cc field is to
+ // unmarshal from the byte representation. We panic if the unmarshal
+ // succeeds but the marshaling of the unchanged protobuf value fails.
+ panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
+ }
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// Decode decodes a cursor from its base-64 string representation.
+func DecodeCursor(s string) (Cursor, error) {
+ if s == "" {
+ return Cursor{&zeroCC}, nil
+ }
+ if n := len(s) % 4; n != 0 {
+ s += strings.Repeat("=", 4-n)
+ }
+ b, err := base64.URLEncoding.DecodeString(s)
+ if err != nil {
+ return Cursor{}, err
+ }
+ cc := &pb.CompiledCursor{}
+ if err := proto.Unmarshal(b, cc); err != nil {
+ return Cursor{}, err
+ }
+ return Cursor{cc}, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 000000000..6aeffb631
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,300 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func toUnixMicro(t time.Time) int64 {
+ // We cannot use t.UnixNano() / 1e3 because we want to handle times more than
+ // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
+ // be represented in the numerator of a single int64 divide.
+ return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
+}
+
+func fromUnixMicro(t int64) time.Time {
+ return time.Unix(t/1e6, (t%1e6)*1e3)
+}
+
+var (
+ minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
+ maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
+)
+
+// valueToProto converts a named value to a newly allocated Property.
+// The returned error string is empty on success.
+func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
+ var (
+ pv pb.PropertyValue
+ unsupported bool
+ )
+ switch v.Kind() {
+ case reflect.Invalid:
+ // No-op.
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ pv.Int64Value = proto.Int64(v.Int())
+ case reflect.Bool:
+ pv.BooleanValue = proto.Bool(v.Bool())
+ case reflect.String:
+ pv.StringValue = proto.String(v.String())
+ case reflect.Float32, reflect.Float64:
+ pv.DoubleValue = proto.Float64(v.Float())
+ case reflect.Ptr:
+ if k, ok := v.Interface().(*Key); ok {
+ if k != nil {
+ pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
+ }
+ } else {
+ unsupported = true
+ }
+ case reflect.Struct:
+ switch t := v.Interface().(type) {
+ case time.Time:
+ if t.Before(minTime) || t.After(maxTime) {
+ return nil, "time value out of range"
+ }
+ pv.Int64Value = proto.Int64(toUnixMicro(t))
+ case appengine.GeoPoint:
+ if !t.Valid() {
+ return nil, "invalid GeoPoint value"
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
+ default:
+ unsupported = true
+ }
+ case reflect.Slice:
+ if b, ok := v.Interface().([]byte); ok {
+ pv.StringValue = proto.String(string(b))
+ } else {
+ // nvToProto should already catch slice values.
+ // If we get here, we have a slice of slice values.
+ unsupported = true
+ }
+ default:
+ unsupported = true
+ }
+ if unsupported {
+ return nil, "unsupported datastore value type: " + v.Type().String()
+ }
+ p = &pb.Property{
+ Name: proto.String(name),
+ Value: &pv,
+ Multiple: proto.Bool(multiple),
+ }
+ if v.IsValid() {
+ switch v.Interface().(type) {
+ case []byte:
+ p.Meaning = pb.Property_BLOB.Enum()
+ case ByteString:
+ p.Meaning = pb.Property_BYTESTRING.Enum()
+ case appengine.BlobKey:
+ p.Meaning = pb.Property_BLOBKEY.Enum()
+ case time.Time:
+ p.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.GeoPoint:
+ p.Meaning = pb.Property_GEORSS_POINT.Enum()
+ }
+ }
+ return p, ""
+}
+
+// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
+func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
+ var err error
+ var props []Property
+ if e, ok := src.(PropertyLoadSaver); ok {
+ props, err = e.Save()
+ } else {
+ props, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return propertiesToProto(defaultAppID, key, props)
+}
+
+func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {
+ p := Property{
+ Name: name,
+ NoIndex: noIndex,
+ Multiple: multiple,
+ }
+ switch x := v.Interface().(type) {
+ case *Key:
+ p.Value = x
+ case time.Time:
+ p.Value = x
+ case appengine.BlobKey:
+ p.Value = x
+ case appengine.GeoPoint:
+ p.Value = x
+ case ByteString:
+ p.Value = x
+ default:
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.Value = v.Int()
+ case reflect.Bool:
+ p.Value = v.Bool()
+ case reflect.String:
+ p.Value = v.String()
+ case reflect.Float32, reflect.Float64:
+ p.Value = v.Float()
+ case reflect.Slice:
+ if v.Type().Elem().Kind() == reflect.Uint8 {
+ p.NoIndex = true
+ p.Value = v.Bytes()
+ }
+ case reflect.Struct:
+ if !v.CanAddr() {
+ return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
+ }
+ sub, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("datastore: unsupported struct field: %v", err)
+ }
+ return sub.(structPLS).save(props, name, noIndex, multiple)
+ }
+ }
+ if p.Value == nil {
+ return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
+ }
+ *props = append(*props, p)
+ return nil
+}
+
+func (s structPLS) Save() ([]Property, error) {
+ var props []Property
+ if err := s.save(&props, "", false, false); err != nil {
+ return nil, err
+ }
+ return props, nil
+}
+
+func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {
+ for i, t := range s.codec.byIndex {
+ if t.name == "-" {
+ continue
+ }
+ name := t.name
+ if prefix != "" {
+ name = prefix + name
+ }
+ v := s.v.Field(i)
+ if !v.IsValid() || !v.CanSet() {
+ continue
+ }
+ noIndex1 := noIndex || t.noIndex
+ // For slice fields that aren't []byte, save each element.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ for j := 0; j < v.Len(); j++ {
+ if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ // Otherwise, save the field itself.
+ if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
+ e := &pb.EntityProto{
+ Key: keyToProto(defaultAppID, key),
+ }
+ if key.parent == nil {
+ e.EntityGroup = &pb.Path{}
+ } else {
+ e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
+ }
+ prevMultiple := make(map[string]bool)
+
+ for _, p := range props {
+ if pm, ok := prevMultiple[p.Name]; ok {
+ if !pm || !p.Multiple {
+ return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
+ }
+ } else {
+ prevMultiple[p.Name] = p.Multiple
+ }
+
+ x := &pb.Property{
+ Name: proto.String(p.Name),
+ Value: new(pb.PropertyValue),
+ Multiple: proto.Bool(p.Multiple),
+ }
+ switch v := p.Value.(type) {
+ case int64:
+ x.Value.Int64Value = proto.Int64(v)
+ case bool:
+ x.Value.BooleanValue = proto.Bool(v)
+ case string:
+ x.Value.StringValue = proto.String(v)
+ if p.NoIndex {
+ x.Meaning = pb.Property_TEXT.Enum()
+ }
+ case float64:
+ x.Value.DoubleValue = proto.Float64(v)
+ case *Key:
+ if v != nil {
+ x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
+ }
+ case time.Time:
+ if v.Before(minTime) || v.After(maxTime) {
+ return nil, fmt.Errorf("datastore: time value out of range")
+ }
+ x.Value.Int64Value = proto.Int64(toUnixMicro(v))
+ x.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.BlobKey:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOBKEY.Enum()
+ case appengine.GeoPoint:
+ if !v.Valid() {
+ return nil, fmt.Errorf("datastore: invalid GeoPoint value")
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
+ x.Meaning = pb.Property_GEORSS_POINT.Enum()
+ case []byte:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOB.Enum()
+ if !p.NoIndex {
+ return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
+ }
+ case ByteString:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BYTESTRING.Enum()
+ default:
+ if p.Value != nil {
+ return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
+ }
+ }
+
+ if p.NoIndex {
+ e.RawProperty = append(e.RawProperty, x)
+ } else {
+ e.Property = append(e.Property, x)
+ if len(e.Property) > maxIndexedProperties {
+ return nil, errors.New("datastore: too many indexed properties")
+ }
+ }
+ }
+ return e, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 000000000..bb4f2562f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,79 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func init() {
+ internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+}
+
+// ErrConcurrentTransaction is returned when a transaction is rolled back due
+// to a conflict with a concurrent transaction.
+var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
+
+// RunInTransaction runs f in a transaction. It calls f with a transaction
+// context tc that f should use for all App Engine operations.
+//
+// If f returns nil, RunInTransaction attempts to commit the transaction,
+// returning nil if it succeeds. If the commit fails due to a conflicting
+// transaction, RunInTransaction retries f, each time with a new transaction
+// context. It gives up and returns ErrConcurrentTransaction after three
+// failed attempts.
+//
+// If f returns non-nil, then any datastore changes will not be applied and
+// RunInTransaction returns that same error. The function f is not retried.
+//
+// Note that when f returns, the transaction is not yet committed. Calling code
+// must be careful not to assume that any of f's changes have been committed
+// until RunInTransaction returns nil.
+//
+// Since f may be called multiple times, f should usually be idempotent.
+// datastore.Get is not idempotent when unmarshaling slice fields.
+//
+// Nested transactions are not supported; c may not be a transaction context.
+func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
+ xg := false
+ if opts != nil {
+ xg = opts.XG
+ }
+ for i := 0; i < 3; i++ {
+ if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction {
+ return err
+ }
+ }
+ return ErrConcurrentTransaction
+}
+
+// TransactionOptions are the options for running a transaction.
+type TransactionOptions struct {
+ // XG is whether the transaction can cross multiple entity groups. In
+ // comparison, a single group transaction is one where all datastore keys
+ // used have the same root key. Note that cross group transactions do not
+ // have the same behavior as single group transactions. In particular, it
+ // is much more likely to see partially applied transactions in different
+ // entity groups, in global queries.
+ // It is valid to set XG to true even if the transaction is within a
+ // single entity group.
+ XG bool
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay.go b/vendor/google.golang.org/appengine/delay/delay.go
new file mode 100644
index 000000000..6ef8937f6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay.go
@@ -0,0 +1,278 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package delay provides a way to execute code outside the scope of a
+user request by using the taskqueue API.
+
+To declare a function that may be executed later, call Func
+in a top-level assignment context, passing it an arbitrary string key
+and a function whose first argument is of type context.Context.
+ var laterFunc = delay.Func("key", myFunc)
+It is also possible to use a function literal.
+ var laterFunc = delay.Func("key", func(c context.Context, x string) {
+ // ...
+ })
+
+To call a function, invoke its Call method.
+ laterFunc.Call(c, "something")
+A function may be called any number of times. If the function has any
+return arguments, and the last one is of type error, the function may
+return a non-nil error to signal that the function should be retried.
+
+The arguments to functions may be of any type that is encodable by the gob
+package. If an argument is of interface type, it is the client's responsibility
+to register with the gob package whatever concrete type may be passed for that
+argument; see http://golang.org/pkg/gob/#Register for details.
+
+Any errors during initialization or execution of a function will be
+logged to the application logs. Error logs that occur during initialization will
+be associated with the request that invoked the Call method.
+
+The state of a function invocation that has not yet successfully
+executed is preserved by combining the file name in which it is declared
+with the string key that was passed to the Func function. Updating an app
+with pending function invocations is safe as long as the relevant
+functions have the (filename, key) combination preserved.
+
+The delay package uses the Task Queue API to create tasks that call the
+reserved application path "/_ah/queue/go/delay".
+This path must not be marked as "login: required" in app.yaml;
+it must be marked as "login: admin" or have no access restriction.
+*/
+package delay
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "runtime"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/taskqueue"
+)
+
+// Function represents a function that may have a delayed invocation.
+type Function struct {
+ fv reflect.Value // Kind() == reflect.Func
+ key string
+ err error // any error during initialization
+}
+
+const (
+ // The HTTP path for invocations.
+ path = "/_ah/queue/go/delay"
+ // Use the default queue.
+ queue = ""
+)
+
+var (
+ // registry of all delayed functions
+ funcs = make(map[string]*Function)
+
+ // precomputed types
+ contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+ // errors
+ errFirstArg = errors.New("first argument must be context.Context")
+)
+
+// Func declares a new Function. The second argument must be a function with a
+// first argument of type context.Context.
+// This function must be called at program initialization time. That means it
+// must be called in a global variable declaration or from an init function.
+// This restriction is necessary because the instance that delays a function
+// call may not be the one that executes it. Only the code executed at program
+// initialization time is guaranteed to have been run by an instance before it
+// receives a request.
+func Func(key string, i interface{}) *Function {
+ f := &Function{fv: reflect.ValueOf(i)}
+
+ // Derive unique, somewhat stable key for this func.
+ _, file, _, _ := runtime.Caller(1)
+ f.key = file + ":" + key
+
+ t := f.fv.Type()
+ if t.Kind() != reflect.Func {
+ f.err = errors.New("not a function")
+ return f
+ }
+ if t.NumIn() == 0 || t.In(0) != contextType {
+ f.err = errFirstArg
+ return f
+ }
+
+ // Register the function's arguments with the gob package.
+ // This is required because they are marshaled inside a []interface{}.
+ // gob.Register only expects to be called during initialization;
+ // that's fine because this function expects the same.
+ for i := 0; i < t.NumIn(); i++ {
+ // Only concrete types may be registered. If the argument has
+ // interface type, the client is resposible for registering the
+ // concrete types it will hold.
+ if t.In(i).Kind() == reflect.Interface {
+ continue
+ }
+ gob.Register(reflect.Zero(t.In(i)).Interface())
+ }
+
+ funcs[f.key] = f
+ return f
+}
+
+type invocation struct {
+ Key string
+ Args []interface{}
+}
+
+// Call invokes a delayed function.
+// f.Call(c, ...)
+// is equivalent to
+// t, _ := f.Task(...)
+// taskqueue.Add(c, t, "")
+func (f *Function) Call(c context.Context, args ...interface{}) {
+ t, err := f.Task(args...)
+ if err != nil {
+ log.Errorf(c, "%v", err)
+ return
+ }
+ if _, err := taskqueueAdder(c, t, queue); err != nil {
+ log.Errorf(c, "delay: taskqueue.Add failed: %v", err)
+ return
+ }
+}
+
+// Task creates a Task that will invoke the function.
+// Its parameters may be tweaked before adding it to a queue.
+// Users should not modify the Path or Payload fields of the returned Task.
+func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {
+ if f.err != nil {
+ return nil, fmt.Errorf("delay: func is invalid: %v", f.err)
+ }
+
+ nArgs := len(args) + 1 // +1 for the context.Context
+ ft := f.fv.Type()
+ minArgs := ft.NumIn()
+ if ft.IsVariadic() {
+ minArgs--
+ }
+ if nArgs < minArgs {
+ return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs)
+ }
+ if !ft.IsVariadic() && nArgs > minArgs {
+ return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs)
+ }
+
+ // Check arg types.
+ for i := 1; i < nArgs; i++ {
+ at := reflect.TypeOf(args[i-1])
+ var dt reflect.Type
+ if i < minArgs {
+ // not a variadic arg
+ dt = ft.In(i)
+ } else {
+ // a variadic arg
+ dt = ft.In(minArgs).Elem()
+ }
+ // nil arguments won't have a type, so they need special handling.
+ if at == nil {
+ // nil interface
+ switch dt.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ continue // may be nil
+ }
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt)
+ }
+ switch at.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ av := reflect.ValueOf(args[i-1])
+ if av.IsNil() {
+ // nil value in interface; not supported by gob, so we replace it
+ // with a nil interface value
+ args[i-1] = nil
+ }
+ }
+ if !at.AssignableTo(dt) {
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt)
+ }
+ }
+
+ inv := invocation{
+ Key: f.key,
+ Args: args,
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(inv); err != nil {
+ return nil, fmt.Errorf("delay: gob encoding failed: %v", err)
+ }
+
+ return &taskqueue.Task{
+ Path: path,
+ Payload: buf.Bytes(),
+ }, nil
+}
+
+var taskqueueAdder = taskqueue.Add // for testing
+
+func init() {
+ http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
+ runFunc(appengine.NewContext(req), w, req)
+ })
+}
+
+func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+
+ var inv invocation
+ if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {
+ log.Errorf(c, "delay: failed decoding task payload: %v", err)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ f := funcs[inv.Key]
+ if f == nil {
+ log.Errorf(c, "delay: no func with key %q found", inv.Key)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ ft := f.fv.Type()
+ in := []reflect.Value{reflect.ValueOf(c)}
+ for _, arg := range inv.Args {
+ var v reflect.Value
+ if arg != nil {
+ v = reflect.ValueOf(arg)
+ } else {
+ // Task was passed a nil argument, so we must construct
+ // the zero value for the argument here.
+ n := len(in) // we're constructing the nth argument
+ var at reflect.Type
+ if !ft.IsVariadic() || n < ft.NumIn()-1 {
+ at = ft.In(n)
+ } else {
+ at = ft.In(ft.NumIn() - 1).Elem()
+ }
+ v = reflect.Zero(at)
+ }
+ in = append(in, v)
+ }
+ out := f.fv.Call(in)
+
+ if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {
+ if errv := out[n-1]; !errv.IsNil() {
+ log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface())
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/app.yaml b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
new file mode 100644
index 000000000..783713951
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
@@ -0,0 +1,19 @@
+# Demo application for Managed VMs.
+application: vm-guestbook
+version: 1
+runtime: go
+vm: true
+api_version: go1
+
+manual_scaling:
+ instances: 1
+
+handlers:
+# Favicon. Without this, the browser hits this once per page view.
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+
+# Main app. All the real work is here.
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
new file mode 100644
index 000000000..1a71ea772
Binary files /dev/null and b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico differ
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
new file mode 100644
index 000000000..906948457
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
@@ -0,0 +1,105 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package guestbook
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+var initTime time.Time
+
+type Greeting struct {
+ Author string
+ Content string
+ Date time.Time
+}
+
+func init() {
+ http.HandleFunc("/", handleMainPage)
+ http.HandleFunc("/sign", handleSign)
+}
+
+// guestbookKey returns the key used for all guestbook entries.
+func guestbookKey(c context.Context) *datastore.Key {
+ // The string "default_guestbook" here could be varied to have multiple guestbooks.
+ return datastore.NewKey(c, "Guestbook", "default_guestbook", 0, nil)
+}
+
+var tpl = template.Must(template.ParseGlob("templates/*.html"))
+
+func handleMainPage(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ http.Error(w, "GET requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ c := appengine.NewContext(r)
+ tic := time.Now()
+ q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(c)).Order("-Date").Limit(10)
+ var gg []*Greeting
+ if _, err := q.GetAll(c, &gg); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ log.Errorf(c, "GetAll: %v", err)
+ return
+ }
+ log.Infof(c, "Datastore lookup took %s", time.Since(tic).String())
+ log.Infof(c, "Rendering %d greetings", len(gg))
+
+ var email, logout, login string
+ if u := user.Current(c); u != nil {
+ logout, _ = user.LogoutURL(c, "/")
+ email = u.Email
+ } else {
+ login, _ = user.LoginURL(c, "/")
+ }
+ data := struct {
+ Greetings []*Greeting
+ Login, Logout, Email string
+ }{
+ Greetings: gg,
+ Login: login,
+ Logout: logout,
+ Email: email,
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil {
+ log.Errorf(c, "%v", err)
+ }
+}
+
+func handleSign(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ c := appengine.NewContext(r)
+ g := &Greeting{
+ Content: r.FormValue("content"),
+ Date: time.Now(),
+ }
+ if u := user.Current(c); u != nil {
+ g.Author = u.String()
+ }
+ key := datastore.NewIncompleteKey(c, "Greeting", guestbookKey(c))
+ if _, err := datastore.Put(c, key, g); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // Redirect with 303 which causes the subsequent request to use GET.
+ http.Redirect(w, r, "/", http.StatusSeeOther)
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/index.yaml b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
new file mode 100644
index 000000000..315ffeb0e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
@@ -0,0 +1,7 @@
+indexes:
+
+- kind: Greeting
+ ancestor: yes
+ properties:
+ - name: Date
+ direction: desc
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
new file mode 100644
index 000000000..322b7cf63
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
@@ -0,0 +1,26 @@
+
+
+
+ Guestbook Demo
+
+
+
+ {{with .Email}}You are currently logged in as {{.}}.{{end}}
+ {{with .Login}}Sign in{{end}}
+ {{with .Logout}}Sign out{{end}}
+
+
+ {{range .Greetings }}
+
+ {{with .Author}}{{.}}{{else}}An anonymous person{{end}}
+ on {{.Date.Format "3:04pm, Mon 2 Jan"}}
+ wrote
{{.Content}}
+
+ {{end}}
+
+
+
+
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/app.yaml b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
new file mode 100644
index 000000000..bac034b77
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
@@ -0,0 +1,15 @@
+application: helloworld
+version: 1
+runtime: go
+api_version: go1
+vm: true
+
+manual_scaling:
+ instances: 1
+
+handlers:
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
new file mode 100644
index 000000000..f19c04d27
Binary files /dev/null and b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico differ
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
new file mode 100644
index 000000000..232492948
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
@@ -0,0 +1,46 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package helloworld
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+)
+
+var initTime = time.Now()
+
+func init() {
+ http.HandleFunc("/", handle)
+}
+
+func handle(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ c := appengine.NewContext(r)
+ log.Infof(c, "Serving the front page.")
+
+ tmpl.Execute(w, time.Since(initTime))
+}
+
+var tmpl = template.Must(template.New("front").Parse(`
+
+
+
+Hello, World! 세상아 안녕!
+
+
+
+This instance has been running for {{.}}.
+
+
+
+`))
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 000000000..16d0772e2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+ callErr, ok := err.(*internal.CallError)
+ return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go
new file mode 100644
index 000000000..c3cd58baf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/file/file.go
@@ -0,0 +1,28 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package file provides helper functions for using Google Cloud Storage.
+package file
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ aipb "google.golang.org/appengine/internal/app_identity"
+)
+
+// DefaultBucketName returns the name of this application's
+// default Google Cloud Storage bucket.
+func DefaultBucketName(c context.Context) (string, error) {
+ req := &aipb.GetDefaultGcsBucketNameRequest{}
+ res := &aipb.GetDefaultGcsBucketNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res)
+ if err != nil {
+ return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
+ }
+ return res.GetDefaultGcsBucketName(), nil
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 000000000..b8dcf8f36
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/app_identity"
+ modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+ return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+ return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+ req := &modpb.GetHostnameRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ if instance != "" {
+ req.Instance = &instance
+ }
+ res := &modpb.GetHostnameResponse{}
+ if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+ return "", err
+ }
+ return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+ req := &pb.GetAccessTokenRequest{Scope: scopes}
+ res := &pb.GetAccessTokenResponse{}
+
+ err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+ if err != nil {
+ return "", time.Time{}, err
+ }
+ return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+ KeyName string
+ Data []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+ req := &pb.GetPublicCertificateForAppRequest{}
+ res := &pb.GetPublicCertificateForAppResponse{}
+ if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+ return nil, err
+ }
+ var cs []Certificate
+ for _, pc := range res.PublicCertificateList {
+ cs = append(cs, Certificate{
+ KeyName: pc.GetKeyName(),
+ Data: []byte(pc.GetX509CertificatePem()),
+ })
+ }
+ return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+ req := &pb.GetServiceAccountNameRequest{}
+ res := &pb.GetServiceAccountNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+ if err != nil {
+ return "", err
+ }
+ return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+ req := &pb.SignForAppRequest{BytesToSign: bytes}
+ res := &pb.SignForAppResponse{}
+
+ if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+ return "", nil, err
+ }
+ return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+ internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go
new file mode 100644
index 000000000..780d53e17
--- /dev/null
+++ b/vendor/google.golang.org/appengine/image/image.go
@@ -0,0 +1,67 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package image provides image services.
+package image
+
+import (
+ "fmt"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/image"
+)
+
+type ServingURLOptions struct {
+ Secure bool // whether the URL should use HTTPS
+
+ // Size must be between zero and 1600.
+ // If Size is non-zero, a resized version of the image is served,
+ // and Size is the served image's longest dimension. The aspect ratio is preserved.
+ // If Crop is true the image is cropped from the center instead of being resized.
+ Size int
+ Crop bool
+}
+
+// ServingURL returns a URL that will serve an image from Blobstore.
+func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) {
+ req := &pb.ImagesGetUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ if opts != nil && opts.Secure {
+ req.CreateSecureUrl = &opts.Secure
+ }
+ res := &pb.ImagesGetUrlBaseResponse{}
+ if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil {
+ return nil, err
+ }
+
+ // The URL may have suffixes added to dynamically resize or crop:
+ // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio.
+ // - adding "=s32-c" is the same as "=s32" except it will be cropped.
+ u := *res.Url
+ if opts != nil && opts.Size > 0 {
+ u += fmt.Sprintf("=s%d", opts.Size)
+ if opts.Crop {
+ u += "-c"
+ }
+ }
+ return url.Parse(u)
+}
+
+// DeleteServingURL deletes the serving URL for an image.
+func DeleteServingURL(c context.Context, key appengine.BlobKey) error {
+ req := &pb.ImagesDeleteUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ res := &pb.ImagesDeleteUrlBaseResponse{}
+ return internal.Call(c, "images", "DeleteUrlBase", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
new file mode 100644
index 000000000..6f5c1977f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
@@ -0,0 +1,80 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package aetesting provides utilities for testing App Engine packages.
+// This is not for testing user applications.
+package aetesting
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// FakeSingleContext returns a context whose Call invocations will be serviced
+// by f, which should be a function that has two arguments of the input and output
+// protocol buffer type, and one error return.
+func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context {
+ fv := reflect.ValueOf(f)
+ if fv.Kind() != reflect.Func {
+ t.Fatal("not a function")
+ }
+ ft := fv.Type()
+ if ft.NumIn() != 2 || ft.NumOut() != 1 {
+ t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut())
+ }
+ for i := 0; i < 2; i++ {
+ at := ft.In(i)
+ if !at.Implements(protoMessageType) {
+ t.Fatalf("arg %d does not implement proto.Message", i)
+ }
+ }
+ if ft.Out(0) != errorType {
+ t.Fatalf("f's return is %v, want error", ft.Out(0))
+ }
+ s := &single{
+ t: t,
+ service: service,
+ method: method,
+ f: fv,
+ }
+ return internal.WithCallOverride(context.Background(), s.call)
+}
+
+var (
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+type single struct {
+ t *testing.T
+ service, method string
+ f reflect.Value
+}
+
+func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ if service == "__go__" {
+ if method == "GetNamespace" {
+ return nil // always yield an empty namespace
+ }
+ return fmt.Errorf("Unknown API call /%s.%s", service, method)
+ }
+ if service != s.service || method != s.method {
+ s.t.Fatalf("Unexpected call to /%s.%s", service, method)
+ }
+ ins := []reflect.Value{
+ reflect.ValueOf(in),
+ reflect.ValueOf(out),
+ }
+ outs := s.f.Call(ins)
+ if outs[0].IsNil() {
+ return nil
+ }
+ return outs[0].Interface().(error)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 000000000..2c7dc93b3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,586 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+)
+
+func apiHost() string {
+ host, port := "appengine.googleapis.com", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return host + ":" + port
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ }
+ stopFlushing := make(chan int)
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+}
+
+var contextKey = "holds a *context"
+
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = WithNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ ctxs.Unlock()
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return withContext(parent, c)
+}
+
+func BackgroundContext() netcontext.Context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return toContext(ctxs.bg)
+ }
+
+ // Compute background security ticket.
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "_"); i >= 0 {
+ majVersion = majVersion[:i]
+ }
+ ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ }
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go ctxs.bg.logFlusher(make(chan int))
+
+ return toContext(ctxs.bg)
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ dst := apiHost()
+ hreq := &http.Request{
+ Method: "POST",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: dst,
+ Path: apiPath,
+ },
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: dst,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if f, ok := ctx.Value(&callOverrideKey).(callOverrideFunc); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errors.New("not an App Engine context")
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 000000000..78d1f14cb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,98 @@
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
+ return fromContext(ctx)
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = WithNamespace(ctx, ns)
+ }
+
+ return ctx
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if f, ok := ctx.Value(&callOverrideKey).(callOverrideFunc); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errors.New("not an App Engine context")
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ return c.Call(service, method, in, out, opts)
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 000000000..c499124d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,58 @@
+package internal
+
+import (
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+type callOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds a callOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f callOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &callOverrideKey, f)
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func WithNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ logf(fromContext(ctx), level, format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 000000000..11df8c07b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 000000000..532dad773
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,294 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+ AppIdentityServiceError
+ SignForAppRequest
+ SignForAppResponse
+ GetPublicCertificateForAppRequest
+ PublicCertificate
+ GetPublicCertificateForAppResponse
+ GetServiceAccountNameRequest
+ GetServiceAccountNameResponse
+ GetAccessTokenRequest
+ GetAccessTokenResponse
+ GetDefaultGcsBucketNameRequest
+ GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+ AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
+ AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
+ AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
+ AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+ AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
+ AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
+ AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
+ AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+ 0: "SUCCESS",
+ 9: "UNKNOWN_SCOPE",
+ 1000: "BLOB_TOO_LARGE",
+ 1001: "DEADLINE_EXCEEDED",
+ 1002: "NOT_A_VALID_APP",
+ 1003: "UNKNOWN_ERROR",
+ 1005: "NOT_ALLOWED",
+ 1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+ "SUCCESS": 0,
+ "UNKNOWN_SCOPE": 9,
+ "BLOB_TOO_LARGE": 1000,
+ "DEADLINE_EXCEEDED": 1001,
+ "NOT_A_VALID_APP": 1002,
+ "UNKNOWN_ERROR": 1003,
+ "NOT_ALLOWED": 1005,
+ "NOT_IMPLEMENTED": 1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+ p := new(AppIdentityServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+ return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = AppIdentityServiceError_ErrorCode(value)
+ return nil
+}
+
+type AppIdentityServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+
+type SignForAppRequest struct {
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+ if m != nil {
+ return m.BytesToSign
+ }
+ return nil
+}
+
+type SignForAppResponse struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+ if m != nil {
+ return m.SignatureBytes
+ }
+ return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
+
+type PublicCertificate struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+
+func (m *PublicCertificate) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+ if m != nil && m.X509CertificatePem != nil {
+ return *m.X509CertificatePem
+ }
+ return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+ PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+ MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+ if m != nil {
+ return m.PublicCertificateList
+ }
+ return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+ if m != nil && m.MaxClientCacheTimeInSecond != nil {
+ return *m.MaxClientCacheTimeInSecond
+ }
+ return 0
+}
+
+type GetServiceAccountNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+
+type GetServiceAccountNameResponse struct {
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenRequest struct {
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+ if m != nil && m.ServiceAccountId != nil {
+ return *m.ServiceAccountId
+ }
+ return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenResponse struct {
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+ if m != nil && m.AccessToken != nil {
+ return *m.AccessToken
+ }
+ return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+
+type GetDefaultGcsBucketNameResponse struct {
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+ if m != nil && m.DefaultGcsBucketName != nil {
+ return *m.DefaultGcsBucketName
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 000000000..19610ca5b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+ enum ErrorCode {
+ SUCCESS = 0;
+ UNKNOWN_SCOPE = 9;
+ BLOB_TOO_LARGE = 1000;
+ DEADLINE_EXCEEDED = 1001;
+ NOT_A_VALID_APP = 1002;
+ UNKNOWN_ERROR = 1003;
+ NOT_ALLOWED = 1005;
+ NOT_IMPLEMENTED = 1006;
+ }
+}
+
+message SignForAppRequest {
+ optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+ optional string key_name = 1;
+ optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+ optional string key_name = 1;
+ optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+ repeated PublicCertificate public_certificate_list = 1;
+ optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+ optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+ repeated string scope = 1;
+ optional int64 service_account_id = 2;
+ optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+ optional string access_token = 1;
+ optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+ optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 000000000..9ecc29b63
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,134 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+ StringProto
+ Integer32Proto
+ Integer64Proto
+ BoolProto
+ DoubleProto
+ BytesProto
+ VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 000000000..56cd7a3ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
new file mode 100644
index 000000000..f2334eb70
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
@@ -0,0 +1,152 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/channel/channel_service.proto
+// DO NOT EDIT!
+
+/*
+Package channel is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/channel/channel_service.proto
+
+It has these top-level messages:
+ ChannelServiceError
+ CreateChannelRequest
+ CreateChannelResponse
+ SendMessageRequest
+*/
+package channel
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type ChannelServiceError_ErrorCode int32
+
+const (
+ ChannelServiceError_OK ChannelServiceError_ErrorCode = 0
+ ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1
+ ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2
+ ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3
+ ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4
+ ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5
+)
+
+var ChannelServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "INVALID_CHANNEL_KEY",
+ 3: "BAD_MESSAGE",
+ 4: "INVALID_CHANNEL_TOKEN_DURATION",
+ 5: "APPID_ALIAS_REQUIRED",
+}
+var ChannelServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "INVALID_CHANNEL_KEY": 2,
+ "BAD_MESSAGE": 3,
+ "INVALID_CHANNEL_TOKEN_DURATION": 4,
+ "APPID_ALIAS_REQUIRED": 5,
+}
+
+func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode {
+ p := new(ChannelServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ChannelServiceError_ErrorCode) String() string {
+ return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x))
+}
+func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ChannelServiceError_ErrorCode(value)
+ return nil
+}
+
+type ChannelServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} }
+func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) }
+func (*ChannelServiceError) ProtoMessage() {}
+
+type CreateChannelRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} }
+func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelRequest) ProtoMessage() {}
+
+func (m *CreateChannelRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *CreateChannelRequest) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type CreateChannelResponse struct {
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} }
+func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelResponse) ProtoMessage() {}
+
+func (m *CreateChannelResponse) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+func (m *CreateChannelResponse) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type SendMessageRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} }
+func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*SendMessageRequest) ProtoMessage() {}
+
+func (m *SendMessageRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *SendMessageRequest) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
new file mode 100644
index 000000000..2b5a918ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
@@ -0,0 +1,30 @@
+syntax = "proto2";
+option go_package = "channel";
+
+package appengine;
+
+message ChannelServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ INVALID_CHANNEL_KEY = 2;
+ BAD_MESSAGE = 3;
+ INVALID_CHANNEL_TOKEN_DURATION = 4;
+ APPID_ALIAS_REQUIRED = 5;
+ }
+}
+
+message CreateChannelRequest {
+ required string application_key = 1;
+ optional int32 duration_minutes = 2;
+}
+
+message CreateChannelResponse {
+ optional string token = 2;
+ optional int32 duration_minutes = 3;
+}
+
+message SendMessageRequest {
+ required string application_key = 1;
+ required string message = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 000000000..cd78da9f6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2776 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+ Action
+ PropertyValue
+ Property
+ Path
+ Reference
+ User
+ EntityProto
+ CompositeProperty
+ Index
+ CompositeIndex
+ IndexPostfix
+ IndexPosition
+ Snapshot
+ InternalHeader
+ Transaction
+ Query
+ CompiledQuery
+ CompiledCursor
+ Cursor
+ Error
+ Cost
+ GetRequest
+ GetResponse
+ PutRequest
+ PutResponse
+ TouchRequest
+ TouchResponse
+ DeleteRequest
+ DeleteResponse
+ NextRequest
+ QueryResult
+ AllocateIdsRequest
+ AllocateIdsResponse
+ CompositeIndices
+ AddActionsRequest
+ AddActionsResponse
+ BeginTransactionRequest
+ CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+
+type Action struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep" json:"element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt" json:"position,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100644
index 000000000..e76f126ff
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 000000000..d538701ab
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 000000000..680c63a6a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,23 @@
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return appengine.DefaultVersionHostname(fromContext(ctx))
+}
+
+func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) }
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
+func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 000000000..f01119730
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,97 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "net/http"
+ "os"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ return fromContext(ctx).Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ appID := os.Getenv("GAE_LONG_APP_ID")
+ if appID == "" {
+ appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ }
+ return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
new file mode 100644
index 000000000..27cf17d0c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
@@ -0,0 +1,843 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/image/images_service.proto
+// DO NOT EDIT!
+
+/*
+Package image is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/image/images_service.proto
+
+It has these top-level messages:
+ ImagesServiceError
+ ImagesServiceTransform
+ Transform
+ ImageData
+ InputSettings
+ OutputSettings
+ ImagesTransformRequest
+ ImagesTransformResponse
+ CompositeImageOptions
+ ImagesCanvas
+ ImagesCompositeRequest
+ ImagesCompositeResponse
+ ImagesHistogramRequest
+ ImagesHistogram
+ ImagesHistogramResponse
+ ImagesGetUrlBaseRequest
+ ImagesGetUrlBaseResponse
+ ImagesDeleteUrlBaseRequest
+ ImagesDeleteUrlBaseResponse
+*/
+package image
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type ImagesServiceError_ErrorCode int32
+
+const (
+ ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1
+ ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2
+ ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3
+ ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4
+ ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5
+ ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6
+ ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7
+ ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8
+)
+
+var ImagesServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "BAD_TRANSFORM_DATA",
+ 3: "NOT_IMAGE",
+ 4: "BAD_IMAGE_DATA",
+ 5: "IMAGE_TOO_LARGE",
+ 6: "INVALID_BLOB_KEY",
+ 7: "ACCESS_DENIED",
+ 8: "OBJECT_NOT_FOUND",
+}
+var ImagesServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "BAD_TRANSFORM_DATA": 2,
+ "NOT_IMAGE": 3,
+ "BAD_IMAGE_DATA": 4,
+ "IMAGE_TOO_LARGE": 5,
+ "INVALID_BLOB_KEY": 6,
+ "ACCESS_DENIED": 7,
+ "OBJECT_NOT_FOUND": 8,
+}
+
+func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode {
+ p := new(ImagesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ImagesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ImagesServiceTransform_Type int32
+
+const (
+ ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1
+ ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2
+ ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3
+ ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4
+ ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5
+ ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6
+)
+
+var ImagesServiceTransform_Type_name = map[int32]string{
+ 1: "RESIZE",
+ 2: "ROTATE",
+ 3: "HORIZONTAL_FLIP",
+ 4: "VERTICAL_FLIP",
+ 5: "CROP",
+ 6: "IM_FEELING_LUCKY",
+}
+var ImagesServiceTransform_Type_value = map[string]int32{
+ "RESIZE": 1,
+ "ROTATE": 2,
+ "HORIZONTAL_FLIP": 3,
+ "VERTICAL_FLIP": 4,
+ "CROP": 5,
+ "IM_FEELING_LUCKY": 6,
+}
+
+func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type {
+ p := new(ImagesServiceTransform_Type)
+ *p = x
+ return p
+}
+func (x ImagesServiceTransform_Type) String() string {
+ return proto.EnumName(ImagesServiceTransform_Type_name, int32(x))
+}
+func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceTransform_Type(value)
+ return nil
+}
+
+type InputSettings_ORIENTATION_CORRECTION_TYPE int32
+
+const (
+ InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0
+ InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1
+)
+
+var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{
+ 0: "UNCHANGED_ORIENTATION",
+ 1: "CORRECT_ORIENTATION",
+}
+var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{
+ "UNCHANGED_ORIENTATION": 0,
+ "CORRECT_ORIENTATION": 1,
+}
+
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE {
+ p := new(InputSettings_ORIENTATION_CORRECTION_TYPE)
+ *p = x
+ return p
+}
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string {
+ return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x))
+}
+func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value)
+ return nil
+}
+
+type OutputSettings_MIME_TYPE int32
+
+const (
+ OutputSettings_PNG OutputSettings_MIME_TYPE = 0
+ OutputSettings_JPEG OutputSettings_MIME_TYPE = 1
+ OutputSettings_WEBP OutputSettings_MIME_TYPE = 2
+)
+
+var OutputSettings_MIME_TYPE_name = map[int32]string{
+ 0: "PNG",
+ 1: "JPEG",
+ 2: "WEBP",
+}
+var OutputSettings_MIME_TYPE_value = map[string]int32{
+ "PNG": 0,
+ "JPEG": 1,
+ "WEBP": 2,
+}
+
+func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE {
+ p := new(OutputSettings_MIME_TYPE)
+ *p = x
+ return p
+}
+func (x OutputSettings_MIME_TYPE) String() string {
+ return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x))
+}
+func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = OutputSettings_MIME_TYPE(value)
+ return nil
+}
+
+type CompositeImageOptions_ANCHOR int32
+
+const (
+ CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0
+ CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1
+ CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2
+ CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3
+ CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4
+ CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5
+ CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6
+ CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7
+ CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8
+)
+
+var CompositeImageOptions_ANCHOR_name = map[int32]string{
+ 0: "TOP_LEFT",
+ 1: "TOP",
+ 2: "TOP_RIGHT",
+ 3: "LEFT",
+ 4: "CENTER",
+ 5: "RIGHT",
+ 6: "BOTTOM_LEFT",
+ 7: "BOTTOM",
+ 8: "BOTTOM_RIGHT",
+}
+var CompositeImageOptions_ANCHOR_value = map[string]int32{
+ "TOP_LEFT": 0,
+ "TOP": 1,
+ "TOP_RIGHT": 2,
+ "LEFT": 3,
+ "CENTER": 4,
+ "RIGHT": 5,
+ "BOTTOM_LEFT": 6,
+ "BOTTOM": 7,
+ "BOTTOM_RIGHT": 8,
+}
+
+func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR {
+ p := new(CompositeImageOptions_ANCHOR)
+ *p = x
+ return p
+}
+func (x CompositeImageOptions_ANCHOR) String() string {
+ return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x))
+}
+func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR")
+ if err != nil {
+ return err
+ }
+ *x = CompositeImageOptions_ANCHOR(value)
+ return nil
+}
+
+type ImagesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} }
+func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceError) ProtoMessage() {}
+
+type ImagesServiceTransform struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} }
+func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceTransform) ProtoMessage() {}
+
+type Transform struct {
+ Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
+ CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"`
+ CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"`
+ CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"`
+ Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"`
+ HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"`
+ VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"`
+ CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"`
+ CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"`
+ CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"`
+ CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"`
+ Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"`
+ AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transform) Reset() { *m = Transform{} }
+func (m *Transform) String() string { return proto.CompactTextString(m) }
+func (*Transform) ProtoMessage() {}
+
+const Default_Transform_CropToFit bool = false
+const Default_Transform_CropOffsetX float32 = 0.5
+const Default_Transform_CropOffsetY float32 = 0.5
+const Default_Transform_Rotate int32 = 0
+const Default_Transform_HorizontalFlip bool = false
+const Default_Transform_VerticalFlip bool = false
+const Default_Transform_CropLeftX float32 = 0
+const Default_Transform_CropTopY float32 = 0
+const Default_Transform_CropRightX float32 = 1
+const Default_Transform_CropBottomY float32 = 1
+const Default_Transform_Autolevels bool = false
+const Default_Transform_AllowStretch bool = false
+
+func (m *Transform) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *Transform) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *Transform) GetCropToFit() bool {
+ if m != nil && m.CropToFit != nil {
+ return *m.CropToFit
+ }
+ return Default_Transform_CropToFit
+}
+
+func (m *Transform) GetCropOffsetX() float32 {
+ if m != nil && m.CropOffsetX != nil {
+ return *m.CropOffsetX
+ }
+ return Default_Transform_CropOffsetX
+}
+
+func (m *Transform) GetCropOffsetY() float32 {
+ if m != nil && m.CropOffsetY != nil {
+ return *m.CropOffsetY
+ }
+ return Default_Transform_CropOffsetY
+}
+
+func (m *Transform) GetRotate() int32 {
+ if m != nil && m.Rotate != nil {
+ return *m.Rotate
+ }
+ return Default_Transform_Rotate
+}
+
+func (m *Transform) GetHorizontalFlip() bool {
+ if m != nil && m.HorizontalFlip != nil {
+ return *m.HorizontalFlip
+ }
+ return Default_Transform_HorizontalFlip
+}
+
+func (m *Transform) GetVerticalFlip() bool {
+ if m != nil && m.VerticalFlip != nil {
+ return *m.VerticalFlip
+ }
+ return Default_Transform_VerticalFlip
+}
+
+func (m *Transform) GetCropLeftX() float32 {
+ if m != nil && m.CropLeftX != nil {
+ return *m.CropLeftX
+ }
+ return Default_Transform_CropLeftX
+}
+
+func (m *Transform) GetCropTopY() float32 {
+ if m != nil && m.CropTopY != nil {
+ return *m.CropTopY
+ }
+ return Default_Transform_CropTopY
+}
+
+func (m *Transform) GetCropRightX() float32 {
+ if m != nil && m.CropRightX != nil {
+ return *m.CropRightX
+ }
+ return Default_Transform_CropRightX
+}
+
+func (m *Transform) GetCropBottomY() float32 {
+ if m != nil && m.CropBottomY != nil {
+ return *m.CropBottomY
+ }
+ return Default_Transform_CropBottomY
+}
+
+func (m *Transform) GetAutolevels() bool {
+ if m != nil && m.Autolevels != nil {
+ return *m.Autolevels
+ }
+ return Default_Transform_Autolevels
+}
+
+func (m *Transform) GetAllowStretch() bool {
+ if m != nil && m.AllowStretch != nil {
+ return *m.AllowStretch
+ }
+ return Default_Transform_AllowStretch
+}
+
+type ImageData struct {
+ Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"`
+ BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"`
+ Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImageData) Reset() { *m = ImageData{} }
+func (m *ImageData) String() string { return proto.CompactTextString(m) }
+func (*ImageData) ProtoMessage() {}
+
+func (m *ImageData) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *ImageData) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImageData) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImageData) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+type InputSettings struct {
+ CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"`
+ ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"`
+ TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InputSettings) Reset() { *m = InputSettings{} }
+func (m *InputSettings) String() string { return proto.CompactTextString(m) }
+func (*InputSettings) ProtoMessage() {}
+
+const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION
+const Default_InputSettings_ParseMetadata bool = false
+
+func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE {
+ if m != nil && m.CorrectExifOrientation != nil {
+ return *m.CorrectExifOrientation
+ }
+ return Default_InputSettings_CorrectExifOrientation
+}
+
+func (m *InputSettings) GetParseMetadata() bool {
+ if m != nil && m.ParseMetadata != nil {
+ return *m.ParseMetadata
+ }
+ return Default_InputSettings_ParseMetadata
+}
+
+func (m *InputSettings) GetTransparentSubstitutionRgb() int32 {
+ if m != nil && m.TransparentSubstitutionRgb != nil {
+ return *m.TransparentSubstitutionRgb
+ }
+ return 0
+}
+
+type OutputSettings struct {
+ MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"`
+ Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OutputSettings) Reset() { *m = OutputSettings{} }
+func (m *OutputSettings) String() string { return proto.CompactTextString(m) }
+func (*OutputSettings) ProtoMessage() {}
+
+const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG
+
+func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE {
+ if m != nil && m.MimeType != nil {
+ return *m.MimeType
+ }
+ return Default_OutputSettings_MimeType
+}
+
+func (m *OutputSettings) GetQuality() int32 {
+ if m != nil && m.Quality != nil {
+ return *m.Quality
+ }
+ return 0
+}
+
+type ImagesTransformRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} }
+func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformRequest) ProtoMessage() {}
+
+func (m *ImagesTransformRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetTransform() []*Transform {
+ if m != nil {
+ return m.Transform
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetInput() *InputSettings {
+ if m != nil {
+ return m.Input
+ }
+ return nil
+}
+
+type ImagesTransformResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} }
+func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformResponse) ProtoMessage() {}
+
+func (m *ImagesTransformResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformResponse) GetSourceMetadata() string {
+ if m != nil && m.SourceMetadata != nil {
+ return *m.SourceMetadata
+ }
+ return ""
+}
+
+type CompositeImageOptions struct {
+ SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"`
+ XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"`
+ YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"`
+ Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"`
+ Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} }
+func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) }
+func (*CompositeImageOptions) ProtoMessage() {}
+
+func (m *CompositeImageOptions) GetSourceIndex() int32 {
+ if m != nil && m.SourceIndex != nil {
+ return *m.SourceIndex
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetXOffset() int32 {
+ if m != nil && m.XOffset != nil {
+ return *m.XOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetYOffset() int32 {
+ if m != nil && m.YOffset != nil {
+ return *m.YOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetOpacity() float32 {
+ if m != nil && m.Opacity != nil {
+ return *m.Opacity
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR {
+ if m != nil && m.Anchor != nil {
+ return *m.Anchor
+ }
+ return CompositeImageOptions_TOP_LEFT
+}
+
+type ImagesCanvas struct {
+ Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} }
+func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) }
+func (*ImagesCanvas) ProtoMessage() {}
+
+const Default_ImagesCanvas_Color int32 = -1
+
+func (m *ImagesCanvas) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesCanvas) GetColor() int32 {
+ if m != nil && m.Color != nil {
+ return *m.Color
+ }
+ return Default_ImagesCanvas_Color
+}
+
+type ImagesCompositeRequest struct {
+ Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"`
+ Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} }
+func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeRequest) ProtoMessage() {}
+
+func (m *ImagesCompositeRequest) GetImage() []*ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas {
+ if m != nil {
+ return m.Canvas
+ }
+ return nil
+}
+
+type ImagesCompositeResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} }
+func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeResponse) ProtoMessage() {}
+
+func (m *ImagesCompositeResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogramRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} }
+func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramRequest) ProtoMessage() {}
+
+func (m *ImagesHistogramRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogram struct {
+ Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"`
+ Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"`
+ Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} }
+func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogram) ProtoMessage() {}
+
+func (m *ImagesHistogram) GetRed() []int32 {
+ if m != nil {
+ return m.Red
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetGreen() []int32 {
+ if m != nil {
+ return m.Green
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetBlue() []int32 {
+ if m != nil {
+ return m.Blue
+ }
+ return nil
+}
+
+type ImagesHistogramResponse struct {
+ Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} }
+func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramResponse) ProtoMessage() {}
+
+func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+type ImagesGetUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} }
+func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseRequest) ProtoMessage() {}
+
+const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false
+
+func (m *ImagesGetUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool {
+ if m != nil && m.CreateSecureUrl != nil {
+ return *m.CreateSecureUrl
+ }
+ return Default_ImagesGetUrlBaseRequest_CreateSecureUrl
+}
+
+type ImagesGetUrlBaseResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} }
+func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseResponse) ProtoMessage() {}
+
+func (m *ImagesGetUrlBaseResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} }
+func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {}
+
+func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} }
+func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto
new file mode 100644
index 000000000..f0d2ed5d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.proto
@@ -0,0 +1,162 @@
+syntax = "proto2";
+option go_package = "image";
+
+package appengine;
+
+message ImagesServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ BAD_TRANSFORM_DATA = 2;
+ NOT_IMAGE = 3;
+ BAD_IMAGE_DATA = 4;
+ IMAGE_TOO_LARGE = 5;
+ INVALID_BLOB_KEY = 6;
+ ACCESS_DENIED = 7;
+ OBJECT_NOT_FOUND = 8;
+ }
+}
+
+message ImagesServiceTransform {
+ enum Type {
+ RESIZE = 1;
+ ROTATE = 2;
+ HORIZONTAL_FLIP = 3;
+ VERTICAL_FLIP = 4;
+ CROP = 5;
+ IM_FEELING_LUCKY = 6;
+ }
+}
+
+message Transform {
+ optional int32 width = 1;
+ optional int32 height = 2;
+ optional bool crop_to_fit = 11 [default = false];
+ optional float crop_offset_x = 12 [default = 0.5];
+ optional float crop_offset_y = 13 [default = 0.5];
+
+ optional int32 rotate = 3 [default = 0];
+
+ optional bool horizontal_flip = 4 [default = false];
+
+ optional bool vertical_flip = 5 [default = false];
+
+ optional float crop_left_x = 6 [default = 0.0];
+ optional float crop_top_y = 7 [default = 0.0];
+ optional float crop_right_x = 8 [default = 1.0];
+ optional float crop_bottom_y = 9 [default = 1.0];
+
+ optional bool autolevels = 10 [default = false];
+
+ optional bool allow_stretch = 14 [default = false];
+}
+
+message ImageData {
+ required bytes content = 1 [ctype=CORD];
+ optional string blob_key = 2;
+
+ optional int32 width = 3;
+ optional int32 height = 4;
+}
+
+message InputSettings {
+ enum ORIENTATION_CORRECTION_TYPE {
+ UNCHANGED_ORIENTATION = 0;
+ CORRECT_ORIENTATION = 1;
+ }
+ optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1
+ [default=UNCHANGED_ORIENTATION];
+ optional bool parse_metadata = 2 [default=false];
+ optional int32 transparent_substitution_rgb = 3;
+}
+
+message OutputSettings {
+ enum MIME_TYPE {
+ PNG = 0;
+ JPEG = 1;
+ WEBP = 2;
+ }
+
+ optional MIME_TYPE mime_type = 1 [default=PNG];
+ optional int32 quality = 2;
+}
+
+message ImagesTransformRequest {
+ required ImageData image = 1;
+ repeated Transform transform = 2;
+ required OutputSettings output = 3;
+ optional InputSettings input = 4;
+}
+
+message ImagesTransformResponse {
+ required ImageData image = 1;
+ optional string source_metadata = 2;
+}
+
+message CompositeImageOptions {
+ required int32 source_index = 1;
+ required int32 x_offset = 2;
+ required int32 y_offset = 3;
+ required float opacity = 4;
+
+ enum ANCHOR {
+ TOP_LEFT = 0;
+ TOP = 1;
+ TOP_RIGHT = 2;
+ LEFT = 3;
+ CENTER = 4;
+ RIGHT = 5;
+ BOTTOM_LEFT = 6;
+ BOTTOM = 7;
+ BOTTOM_RIGHT = 8;
+ }
+
+ required ANCHOR anchor = 5;
+}
+
+message ImagesCanvas {
+ required int32 width = 1;
+ required int32 height = 2;
+ required OutputSettings output = 3;
+ optional int32 color = 4 [default=-1];
+}
+
+message ImagesCompositeRequest {
+ repeated ImageData image = 1;
+ repeated CompositeImageOptions options = 2;
+ required ImagesCanvas canvas = 3;
+}
+
+message ImagesCompositeResponse {
+ required ImageData image = 1;
+}
+
+message ImagesHistogramRequest {
+ required ImageData image = 1;
+}
+
+message ImagesHistogram {
+ repeated int32 red = 1;
+ repeated int32 green = 2;
+ repeated int32 blue = 3;
+}
+
+message ImagesHistogramResponse {
+ required ImagesHistogram histogram = 1;
+}
+
+message ImagesGetUrlBaseRequest {
+ required string blob_key = 1;
+
+ optional bool create_secure_url = 2 [default = false];
+}
+
+message ImagesGetUrlBaseResponse {
+ required string url = 1;
+}
+
+message ImagesDeleteUrlBaseRequest {
+ required string blob_key = 1;
+}
+
+message ImagesDeleteUrlBaseResponse {
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 000000000..914145b5b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,161 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError "
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// Main is designed so that the complete generated main package is:
+//
+// package main
+//
+// import (
+// "google.golang.org/appengine/internal"
+//
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
+//
+// func main() {
+// internal.Main()
+// }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 000000000..ebb2bab09
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,897 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+ LogServiceError
+ UserAppLogLine
+ UserAppLogGroup
+ FlushRequest
+ SetStatusRequest
+ LogOffset
+ LogLine
+ RequestLog
+ LogModuleVersion
+ LogReadRequest
+ LogReadResponse
+ LogUsageRecord
+ LogUsageRequest
+ LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+
+type LogServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 000000000..8981dc475
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
new file mode 100644
index 000000000..06a5d51ee
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
@@ -0,0 +1,227 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/mail/mail_service.proto
+// DO NOT EDIT!
+
+/*
+Package mail is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/mail/mail_service.proto
+
+It has these top-level messages:
+ MailServiceError
+ MailAttachment
+ MailHeader
+ MailMessage
+*/
+package mail
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MailServiceError_ErrorCode int32
+
+const (
+ MailServiceError_OK MailServiceError_ErrorCode = 0
+ MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1
+ MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2
+ MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3
+ MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4
+ MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5
+ MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6
+)
+
+var MailServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BAD_REQUEST",
+ 3: "UNAUTHORIZED_SENDER",
+ 4: "INVALID_ATTACHMENT_TYPE",
+ 5: "INVALID_HEADER_NAME",
+ 6: "INVALID_CONTENT_ID",
+}
+var MailServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BAD_REQUEST": 2,
+ "UNAUTHORIZED_SENDER": 3,
+ "INVALID_ATTACHMENT_TYPE": 4,
+ "INVALID_HEADER_NAME": 5,
+ "INVALID_CONTENT_ID": 6,
+}
+
+func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode {
+ p := new(MailServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MailServiceError_ErrorCode) String() string {
+ return proto.EnumName(MailServiceError_ErrorCode_name, int32(x))
+}
+func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MailServiceError_ErrorCode(value)
+ return nil
+}
+
+type MailServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailServiceError) Reset() { *m = MailServiceError{} }
+func (m *MailServiceError) String() string { return proto.CompactTextString(m) }
+func (*MailServiceError) ProtoMessage() {}
+
+type MailAttachment struct {
+ FileName *string `protobuf:"bytes,1,req" json:"FileName,omitempty"`
+ Data []byte `protobuf:"bytes,2,req" json:"Data,omitempty"`
+ ContentID *string `protobuf:"bytes,3,opt" json:"ContentID,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailAttachment) Reset() { *m = MailAttachment{} }
+func (m *MailAttachment) String() string { return proto.CompactTextString(m) }
+func (*MailAttachment) ProtoMessage() {}
+
+func (m *MailAttachment) GetFileName() string {
+ if m != nil && m.FileName != nil {
+ return *m.FileName
+ }
+ return ""
+}
+
+func (m *MailAttachment) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *MailAttachment) GetContentID() string {
+ if m != nil && m.ContentID != nil {
+ return *m.ContentID
+ }
+ return ""
+}
+
+type MailHeader struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailHeader) Reset() { *m = MailHeader{} }
+func (m *MailHeader) String() string { return proto.CompactTextString(m) }
+func (*MailHeader) ProtoMessage() {}
+
+func (m *MailHeader) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MailHeader) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type MailMessage struct {
+ Sender *string `protobuf:"bytes,1,req" json:"Sender,omitempty"`
+ ReplyTo *string `protobuf:"bytes,2,opt" json:"ReplyTo,omitempty"`
+ To []string `protobuf:"bytes,3,rep" json:"To,omitempty"`
+ Cc []string `protobuf:"bytes,4,rep" json:"Cc,omitempty"`
+ Bcc []string `protobuf:"bytes,5,rep" json:"Bcc,omitempty"`
+ Subject *string `protobuf:"bytes,6,req" json:"Subject,omitempty"`
+ TextBody *string `protobuf:"bytes,7,opt" json:"TextBody,omitempty"`
+ HtmlBody *string `protobuf:"bytes,8,opt" json:"HtmlBody,omitempty"`
+ Attachment []*MailAttachment `protobuf:"bytes,9,rep" json:"Attachment,omitempty"`
+ Header []*MailHeader `protobuf:"bytes,10,rep" json:"Header,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailMessage) Reset() { *m = MailMessage{} }
+func (m *MailMessage) String() string { return proto.CompactTextString(m) }
+func (*MailMessage) ProtoMessage() {}
+
+func (m *MailMessage) GetSender() string {
+ if m != nil && m.Sender != nil {
+ return *m.Sender
+ }
+ return ""
+}
+
+func (m *MailMessage) GetReplyTo() string {
+ if m != nil && m.ReplyTo != nil {
+ return *m.ReplyTo
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTo() []string {
+ if m != nil {
+ return m.To
+ }
+ return nil
+}
+
+func (m *MailMessage) GetCc() []string {
+ if m != nil {
+ return m.Cc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetBcc() []string {
+ if m != nil {
+ return m.Bcc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetSubject() string {
+ if m != nil && m.Subject != nil {
+ return *m.Subject
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTextBody() string {
+ if m != nil && m.TextBody != nil {
+ return *m.TextBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetHtmlBody() string {
+ if m != nil && m.HtmlBody != nil {
+ return *m.HtmlBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetAttachment() []*MailAttachment {
+ if m != nil {
+ return m.Attachment
+ }
+ return nil
+}
+
+func (m *MailMessage) GetHeader() []*MailHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.proto b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
new file mode 100644
index 000000000..4e57b7aa5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
@@ -0,0 +1,45 @@
+syntax = "proto2";
+option go_package = "mail";
+
+package appengine;
+
+message MailServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BAD_REQUEST = 2;
+ UNAUTHORIZED_SENDER = 3;
+ INVALID_ATTACHMENT_TYPE = 4;
+ INVALID_HEADER_NAME = 5;
+ INVALID_CONTENT_ID = 6;
+ }
+}
+
+message MailAttachment {
+ required string FileName = 1;
+ required bytes Data = 2;
+ optional string ContentID = 3;
+}
+
+message MailHeader {
+ required string name = 1;
+ required string value = 2;
+}
+
+message MailMessage {
+ required string Sender = 1;
+ optional string ReplyTo = 2;
+
+ repeated string To = 3;
+ repeated string Cc = 4;
+ repeated string Bcc = 5;
+
+ required string Subject = 6;
+
+ optional string TextBody = 7;
+ optional string HtmlBody = 8;
+
+ repeated MailAttachment Attachment = 9;
+
+ repeated MailHeader Header = 10;
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
new file mode 100644
index 000000000..d0d1f5f5c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
@@ -0,0 +1,936 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/memcache/memcache_service.proto
+// DO NOT EDIT!
+
+/*
+Package memcache is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/memcache/memcache_service.proto
+
+It has these top-level messages:
+ MemcacheServiceError
+ AppOverride
+ MemcacheGetRequest
+ MemcacheGetResponse
+ MemcacheSetRequest
+ MemcacheSetResponse
+ MemcacheDeleteRequest
+ MemcacheDeleteResponse
+ MemcacheIncrementRequest
+ MemcacheIncrementResponse
+ MemcacheBatchIncrementRequest
+ MemcacheBatchIncrementResponse
+ MemcacheFlushRequest
+ MemcacheFlushResponse
+ MemcacheStatsRequest
+ MergedNamespaceStats
+ MemcacheStatsResponse
+ MemcacheGrabTailRequest
+ MemcacheGrabTailResponse
+*/
+package memcache
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MemcacheServiceError_ErrorCode int32
+
+const (
+ MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0
+ MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1
+ MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2
+ MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3
+ MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6
+)
+
+var MemcacheServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNSPECIFIED_ERROR",
+ 2: "NAMESPACE_NOT_SET",
+ 3: "PERMISSION_DENIED",
+ 6: "INVALID_VALUE",
+}
+var MemcacheServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNSPECIFIED_ERROR": 1,
+ "NAMESPACE_NOT_SET": 2,
+ "PERMISSION_DENIED": 3,
+ "INVALID_VALUE": 6,
+}
+
+func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {
+ p := new(MemcacheServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MemcacheServiceError_ErrorCode) String() string {
+ return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))
+}
+func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheServiceError_ErrorCode(value)
+ return nil
+}
+
+type MemcacheSetRequest_SetPolicy int32
+
+const (
+ MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1
+ MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2
+ MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3
+ MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4
+)
+
+var MemcacheSetRequest_SetPolicy_name = map[int32]string{
+ 1: "SET",
+ 2: "ADD",
+ 3: "REPLACE",
+ 4: "CAS",
+}
+var MemcacheSetRequest_SetPolicy_value = map[string]int32{
+ "SET": 1,
+ "ADD": 2,
+ "REPLACE": 3,
+ "CAS": 4,
+}
+
+func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {
+ p := new(MemcacheSetRequest_SetPolicy)
+ *p = x
+ return p
+}
+func (x MemcacheSetRequest_SetPolicy) String() string {
+ return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))
+}
+func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetRequest_SetPolicy(value)
+ return nil
+}
+
+type MemcacheSetResponse_SetStatusCode int32
+
+const (
+ MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1
+ MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2
+ MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3
+ MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4
+)
+
+var MemcacheSetResponse_SetStatusCode_name = map[int32]string{
+ 1: "STORED",
+ 2: "NOT_STORED",
+ 3: "ERROR",
+ 4: "EXISTS",
+}
+var MemcacheSetResponse_SetStatusCode_value = map[string]int32{
+ "STORED": 1,
+ "NOT_STORED": 2,
+ "ERROR": 3,
+ "EXISTS": 4,
+}
+
+func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {
+ p := new(MemcacheSetResponse_SetStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheSetResponse_SetStatusCode) String() string {
+ return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))
+}
+func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetResponse_SetStatusCode(value)
+ return nil
+}
+
+type MemcacheDeleteResponse_DeleteStatusCode int32
+
+const (
+ MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1
+ MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2
+)
+
+var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{
+ 1: "DELETED",
+ 2: "NOT_FOUND",
+}
+var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{
+ "DELETED": 1,
+ "NOT_FOUND": 2,
+}
+
+func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {
+ p := new(MemcacheDeleteResponse_DeleteStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheDeleteResponse_DeleteStatusCode) String() string {
+ return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))
+}
+func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheDeleteResponse_DeleteStatusCode(value)
+ return nil
+}
+
+type MemcacheIncrementRequest_Direction int32
+
+const (
+ MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1
+ MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2
+)
+
+var MemcacheIncrementRequest_Direction_name = map[int32]string{
+ 1: "INCREMENT",
+ 2: "DECREMENT",
+}
+var MemcacheIncrementRequest_Direction_value = map[string]int32{
+ "INCREMENT": 1,
+ "DECREMENT": 2,
+}
+
+func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {
+ p := new(MemcacheIncrementRequest_Direction)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementRequest_Direction) String() string {
+ return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))
+}
+func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementRequest_Direction(value)
+ return nil
+}
+
+type MemcacheIncrementResponse_IncrementStatusCode int32
+
+const (
+ MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1
+ MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2
+ MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3
+)
+
+var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{
+ 1: "OK",
+ 2: "NOT_CHANGED",
+ 3: "ERROR",
+}
+var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{
+ "OK": 1,
+ "NOT_CHANGED": 2,
+ "ERROR": 3,
+}
+
+func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {
+ p := new(MemcacheIncrementResponse_IncrementStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementResponse_IncrementStatusCode) String() string {
+ return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))
+}
+func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementResponse_IncrementStatusCode(value)
+ return nil
+}
+
+type MemcacheServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} }
+func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }
+func (*MemcacheServiceError) ProtoMessage() {}
+
+type AppOverride struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"`
+ IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"`
+ MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"`
+ MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppOverride) Reset() { *m = AppOverride{} }
+func (m *AppOverride) String() string { return proto.CompactTextString(m) }
+func (*AppOverride) ProtoMessage() {}
+
+func (m *AppOverride) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *AppOverride) GetNumMemcachegBackends() int32 {
+ if m != nil && m.NumMemcachegBackends != nil {
+ return *m.NumMemcachegBackends
+ }
+ return 0
+}
+
+func (m *AppOverride) GetIgnoreShardlock() bool {
+ if m != nil && m.IgnoreShardlock != nil {
+ return *m.IgnoreShardlock
+ }
+ return false
+}
+
+func (m *AppOverride) GetMemcachePoolHint() string {
+ if m != nil && m.MemcachePoolHint != nil {
+ return *m.MemcachePoolHint
+ }
+ return ""
+}
+
+func (m *AppOverride) GetMemcacheShardingStrategy() []byte {
+ if m != nil {
+ return m.MemcacheShardingStrategy
+ }
+ return nil
+}
+
+type MemcacheGetRequest struct {
+ Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} }
+func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetRequest) ProtoMessage() {}
+
+func (m *MemcacheGetRequest) GetKey() [][]byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGetRequest) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+func (m *MemcacheGetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGetResponse struct {
+ Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} }
+func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse) ProtoMessage() {}
+
+func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGetResponse_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"`
+ ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} }
+func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGetResponse_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {
+ if m != nil && m.ExpiresInSeconds != nil {
+ return *m.ExpiresInSeconds
+ }
+ return 0
+}
+
+type MemcacheSetRequest struct {
+ Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} }
+func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest) ProtoMessage() {}
+
+func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheSetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheSetRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"`
+ ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"`
+ ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} }
+func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET
+const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0
+
+func (m *MemcacheSetRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {
+ if m != nil && m.SetPolicy != nil {
+ return *m.SetPolicy
+ }
+ return Default_MemcacheSetRequest_Item_SetPolicy
+}
+
+func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return Default_MemcacheSetRequest_Item_ExpirationTime
+}
+
+func (m *MemcacheSetRequest_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+type MemcacheSetResponse struct {
+ SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} }
+func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetResponse) ProtoMessage() {}
+
+func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {
+ if m != nil {
+ return m.SetStatus
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest struct {
+ Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} }
+func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest) ProtoMessage() {}
+
+func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheDeleteRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} }
+func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0
+
+func (m *MemcacheDeleteRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {
+ if m != nil && m.DeleteTime != nil {
+ return *m.DeleteTime
+ }
+ return Default_MemcacheDeleteRequest_Item_DeleteTime
+}
+
+type MemcacheDeleteResponse struct {
+ DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} }
+func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteResponse) ProtoMessage() {}
+
+func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {
+ if m != nil {
+ return m.DeleteStatus
+ }
+ return nil
+}
+
+type MemcacheIncrementRequest struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"`
+ Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"`
+ InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"`
+ InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"`
+ Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} }
+func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementRequest) ProtoMessage() {}
+
+const Default_MemcacheIncrementRequest_Delta uint64 = 1
+const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT
+
+func (m *MemcacheIncrementRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheIncrementRequest) GetDelta() uint64 {
+ if m != nil && m.Delta != nil {
+ return *m.Delta
+ }
+ return Default_MemcacheIncrementRequest_Delta
+}
+
+func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_MemcacheIncrementRequest_Direction
+}
+
+func (m *MemcacheIncrementRequest) GetInitialValue() uint64 {
+ if m != nil && m.InitialValue != nil {
+ return *m.InitialValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {
+ if m != nil && m.InitialFlags != nil {
+ return *m.InitialFlags
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheIncrementResponse struct {
+ NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"`
+ IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} }
+func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheIncrementResponse) GetNewValue() uint64 {
+ if m != nil && m.NewValue != nil {
+ return *m.NewValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {
+ if m != nil && m.IncrementStatus != nil {
+ return *m.IncrementStatus
+ }
+ return MemcacheIncrementResponse_OK
+}
+
+type MemcacheBatchIncrementRequest struct {
+ NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} }
+func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementRequest) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheBatchIncrementResponse struct {
+ Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} }
+func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheFlushRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} }
+func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushRequest) ProtoMessage() {}
+
+func (m *MemcacheFlushRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheFlushResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} }
+func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushResponse) ProtoMessage() {}
+
+type MemcacheStatsRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} }
+func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsRequest) ProtoMessage() {}
+
+func (m *MemcacheStatsRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MergedNamespaceStats struct {
+ Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"`
+ Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"`
+ ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"`
+ Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"`
+ Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"`
+ OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} }
+func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }
+func (*MergedNamespaceStats) ProtoMessage() {}
+
+func (m *MergedNamespaceStats) GetHits() uint64 {
+ if m != nil && m.Hits != nil {
+ return *m.Hits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetMisses() uint64 {
+ if m != nil && m.Misses != nil {
+ return *m.Misses
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetByteHits() uint64 {
+ if m != nil && m.ByteHits != nil {
+ return *m.ByteHits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetItems() uint64 {
+ if m != nil && m.Items != nil {
+ return *m.Items
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetBytes() uint64 {
+ if m != nil && m.Bytes != nil {
+ return *m.Bytes
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetOldestItemAge() uint32 {
+ if m != nil && m.OldestItemAge != nil {
+ return *m.OldestItemAge
+ }
+ return 0
+}
+
+type MemcacheStatsResponse struct {
+ Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} }
+func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsResponse) ProtoMessage() {}
+
+func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {
+ if m != nil {
+ return m.Stats
+ }
+ return nil
+}
+
+type MemcacheGrabTailRequest struct {
+ ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} }
+func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailRequest) ProtoMessage() {}
+
+func (m *MemcacheGrabTailRequest) GetItemCount() int32 {
+ if m != nil && m.ItemCount != nil {
+ return *m.ItemCount
+ }
+ return 0
+}
+
+func (m *MemcacheGrabTailRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse struct {
+ Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} }
+func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse_Item struct {
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} }
+func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
new file mode 100644
index 000000000..5f0edcdc7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
@@ -0,0 +1,165 @@
+syntax = "proto2";
+option go_package = "memcache";
+
+package appengine;
+
+message MemcacheServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNSPECIFIED_ERROR = 1;
+ NAMESPACE_NOT_SET = 2;
+ PERMISSION_DENIED = 3;
+ INVALID_VALUE = 6;
+ }
+}
+
+message AppOverride {
+ required string app_id = 1;
+
+ optional int32 num_memcacheg_backends = 2 [deprecated=true];
+ optional bool ignore_shardlock = 3 [deprecated=true];
+ optional string memcache_pool_hint = 4 [deprecated=true];
+ optional bytes memcache_sharding_strategy = 5 [deprecated=true];
+}
+
+message MemcacheGetRequest {
+ repeated bytes key = 1;
+ optional string name_space = 2 [default = ""];
+ optional bool for_cas = 4;
+ optional AppOverride override = 5;
+}
+
+message MemcacheGetResponse {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+ optional fixed32 flags = 4;
+ optional fixed64 cas_id = 5;
+ optional int32 expires_in_seconds = 6;
+ }
+}
+
+message MemcacheSetRequest {
+ enum SetPolicy {
+ SET = 1;
+ ADD = 2;
+ REPLACE = 3;
+ CAS = 4;
+ }
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+
+ optional fixed32 flags = 4;
+ optional SetPolicy set_policy = 5 [default = SET];
+ optional fixed32 expiration_time = 6 [default = 0];
+
+ optional fixed64 cas_id = 8;
+ optional bool for_cas = 9;
+ }
+ optional string name_space = 7 [default = ""];
+ optional AppOverride override = 10;
+}
+
+message MemcacheSetResponse {
+ enum SetStatusCode {
+ STORED = 1;
+ NOT_STORED = 2;
+ ERROR = 3;
+ EXISTS = 4;
+ }
+ repeated SetStatusCode set_status = 1;
+}
+
+message MemcacheDeleteRequest {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ optional fixed32 delete_time = 3 [default = 0];
+ }
+ optional string name_space = 4 [default = ""];
+ optional AppOverride override = 5;
+}
+
+message MemcacheDeleteResponse {
+ enum DeleteStatusCode {
+ DELETED = 1;
+ NOT_FOUND = 2;
+ }
+ repeated DeleteStatusCode delete_status = 1;
+}
+
+message MemcacheIncrementRequest {
+ enum Direction {
+ INCREMENT = 1;
+ DECREMENT = 2;
+ }
+ required bytes key = 1;
+ optional string name_space = 4 [default = ""];
+
+ optional uint64 delta = 2 [default = 1];
+ optional Direction direction = 3 [default = INCREMENT];
+
+ optional uint64 initial_value = 5;
+ optional fixed32 initial_flags = 6;
+ optional AppOverride override = 7;
+}
+
+message MemcacheIncrementResponse {
+ enum IncrementStatusCode {
+ OK = 1;
+ NOT_CHANGED = 2;
+ ERROR = 3;
+ }
+
+ optional uint64 new_value = 1;
+ optional IncrementStatusCode increment_status = 2;
+}
+
+message MemcacheBatchIncrementRequest {
+ optional string name_space = 1 [default = ""];
+ repeated MemcacheIncrementRequest item = 2;
+ optional AppOverride override = 3;
+}
+
+message MemcacheBatchIncrementResponse {
+ repeated MemcacheIncrementResponse item = 1;
+}
+
+message MemcacheFlushRequest {
+ optional AppOverride override = 1;
+}
+
+message MemcacheFlushResponse {
+}
+
+message MemcacheStatsRequest {
+ optional AppOverride override = 1;
+}
+
+message MergedNamespaceStats {
+ required uint64 hits = 1;
+ required uint64 misses = 2;
+ required uint64 byte_hits = 3;
+
+ required uint64 items = 4;
+ required uint64 bytes = 5;
+
+ required fixed32 oldest_item_age = 6;
+}
+
+message MemcacheStatsResponse {
+ optional MergedNamespaceStats stats = 1;
+}
+
+message MemcacheGrabTailRequest {
+ required int32 item_count = 1;
+ optional string name_space = 2 [default = ""];
+ optional AppOverride override = 3;
+}
+
+message MemcacheGrabTailResponse {
+ repeated group Item = 1 {
+ required bytes value = 2;
+ optional fixed32 flags = 3;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 000000000..b68fb7536
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "X-Google-Metadata-Request": []string{"True"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ log.Fatalf("Metadata fetch failed: %v", err)
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 000000000..341e7d1fc
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,373 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+ ModulesServiceError
+ GetModulesRequest
+ GetModulesResponse
+ GetVersionsRequest
+ GetVersionsResponse
+ GetDefaultVersionRequest
+ GetDefaultVersionResponse
+ GetNumInstancesRequest
+ GetNumInstancesResponse
+ SetNumInstancesRequest
+ SetNumInstancesResponse
+ StartModuleRequest
+ StartModuleResponse
+ StopModuleRequest
+ StopModuleResponse
+ GetHostnameRequest
+ GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+ ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
+ ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
+ ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
+ ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+ ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
+ ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_MODULE",
+ 2: "INVALID_VERSION",
+ 3: "INVALID_INSTANCES",
+ 4: "TRANSIENT_ERROR",
+ 5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_MODULE": 1,
+ "INVALID_VERSION": 2,
+ "INVALID_INSTANCES": 3,
+ "TRANSIENT_ERROR": 4,
+ "UNEXPECTED_STATE": 5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+ p := new(ModulesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ModulesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ModulesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+
+type GetModulesRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+
+type GetModulesResponse struct {
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+
+func (m *GetModulesResponse) GetModule() []string {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+type GetVersionsRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+
+func (m *GetVersionsRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetVersionsResponse struct {
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type GetDefaultVersionRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetDefaultVersionResponse struct {
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesResponse struct {
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+
+type StartModuleRequest struct {
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+
+func (m *StartModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StartModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+
+type StopModuleRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+
+func (m *StopModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StopModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+
+type GetHostnameRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+
+func (m *GetHostnameRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+ if m != nil && m.Instance != nil {
+ return *m.Instance
+ }
+ return ""
+}
+
+type GetHostnameResponse struct {
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+ if m != nil && m.Hostname != nil {
+ return *m.Hostname
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 000000000..d29f0065a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_MODULE = 1;
+ INVALID_VERSION = 2;
+ INVALID_INSTANCES = 3;
+ TRANSIENT_ERROR = 4;
+ UNEXPECTED_STATE = 5;
+ }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+ repeated string module = 1;
+}
+
+message GetVersionsRequest {
+ optional string module = 1;
+}
+
+message GetVersionsResponse {
+ repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+ optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+ required string version = 1;
+}
+
+message GetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+ required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+ required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+ required string module = 1;
+ required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+ optional string module = 1;
+ optional string version = 2;
+ optional string instance = 3;
+}
+
+message GetHostnameResponse {
+ required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 000000000..12ddfbf59
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,63 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ mu sync.Mutex // only for closing the net.Conn
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ lc.mu.Lock()
+ defer lc.mu.Unlock()
+
+ if lc.Conn == nil {
+ // Silently ignore double close.
+ return nil
+ }
+ limitRelease()
+ err := lc.Conn.Close()
+ lc.Conn = nil
+ runtime.SetFinalizer(lc, nil)
+ return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100644
index 000000000..f98e761c9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,38 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+for f in $(find $PKG/internal -name '*.proto'); do
+ echo 1>&2 "* $f"
+ protoc --go_out=. $f
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 000000000..aceca21cd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,229 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+ Request
+ ApplicationError
+ RpcError
+ Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 000000000..f21763a4e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go
new file mode 100644
index 000000000..16cd51eee
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go
@@ -0,0 +1,2059 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/search/search.proto
+// DO NOT EDIT!
+
+/*
+Package search is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/search/search.proto
+
+It has these top-level messages:
+ Scope
+ Entry
+ AccessControlList
+ FieldValue
+ Field
+ FieldTypes
+ FacetValue
+ Facet
+ Document
+ SearchServiceError
+ RequestStatus
+ IndexSpec
+ IndexMetadata
+ IndexDocumentParams
+ IndexDocumentRequest
+ IndexDocumentResponse
+ DeleteDocumentParams
+ DeleteDocumentRequest
+ DeleteDocumentResponse
+ ListDocumentsParams
+ ListDocumentsRequest
+ ListDocumentsResponse
+ ListIndexesParams
+ ListIndexesRequest
+ ListIndexesResponse
+ DeleteSchemaParams
+ DeleteSchemaRequest
+ DeleteSchemaResponse
+ SortSpec
+ ScorerSpec
+ FieldSpec
+ FacetRange
+ FacetRequestParam
+ FacetAutoDetectParam
+ FacetRequest
+ FacetRefine
+ SearchParams
+ SearchRequest
+ FacetResultValue
+ FacetResult
+ SearchResult
+ SearchResponse
+*/
+package search
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type Scope_Type int32
+
+const (
+ Scope_USER_BY_CANONICAL_ID Scope_Type = 1
+ Scope_USER_BY_EMAIL Scope_Type = 2
+ Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3
+ Scope_GROUP_BY_EMAIL Scope_Type = 4
+ Scope_GROUP_BY_DOMAIN Scope_Type = 5
+ Scope_ALL_USERS Scope_Type = 6
+ Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7
+)
+
+var Scope_Type_name = map[int32]string{
+ 1: "USER_BY_CANONICAL_ID",
+ 2: "USER_BY_EMAIL",
+ 3: "GROUP_BY_CANONICAL_ID",
+ 4: "GROUP_BY_EMAIL",
+ 5: "GROUP_BY_DOMAIN",
+ 6: "ALL_USERS",
+ 7: "ALL_AUTHENTICATED_USERS",
+}
+var Scope_Type_value = map[string]int32{
+ "USER_BY_CANONICAL_ID": 1,
+ "USER_BY_EMAIL": 2,
+ "GROUP_BY_CANONICAL_ID": 3,
+ "GROUP_BY_EMAIL": 4,
+ "GROUP_BY_DOMAIN": 5,
+ "ALL_USERS": 6,
+ "ALL_AUTHENTICATED_USERS": 7,
+}
+
+func (x Scope_Type) Enum() *Scope_Type {
+ p := new(Scope_Type)
+ *p = x
+ return p
+}
+func (x Scope_Type) String() string {
+ return proto.EnumName(Scope_Type_name, int32(x))
+}
+func (x *Scope_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type")
+ if err != nil {
+ return err
+ }
+ *x = Scope_Type(value)
+ return nil
+}
+
+type Entry_Permission int32
+
+const (
+ Entry_READ Entry_Permission = 1
+ Entry_WRITE Entry_Permission = 2
+ Entry_FULL_CONTROL Entry_Permission = 3
+)
+
+var Entry_Permission_name = map[int32]string{
+ 1: "READ",
+ 2: "WRITE",
+ 3: "FULL_CONTROL",
+}
+var Entry_Permission_value = map[string]int32{
+ "READ": 1,
+ "WRITE": 2,
+ "FULL_CONTROL": 3,
+}
+
+func (x Entry_Permission) Enum() *Entry_Permission {
+ p := new(Entry_Permission)
+ *p = x
+ return p
+}
+func (x Entry_Permission) String() string {
+ return proto.EnumName(Entry_Permission_name, int32(x))
+}
+func (x *Entry_Permission) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission")
+ if err != nil {
+ return err
+ }
+ *x = Entry_Permission(value)
+ return nil
+}
+
+type FieldValue_ContentType int32
+
+const (
+ FieldValue_TEXT FieldValue_ContentType = 0
+ FieldValue_HTML FieldValue_ContentType = 1
+ FieldValue_ATOM FieldValue_ContentType = 2
+ FieldValue_DATE FieldValue_ContentType = 3
+ FieldValue_NUMBER FieldValue_ContentType = 4
+ FieldValue_GEO FieldValue_ContentType = 5
+)
+
+var FieldValue_ContentType_name = map[int32]string{
+ 0: "TEXT",
+ 1: "HTML",
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+ 5: "GEO",
+}
+var FieldValue_ContentType_value = map[string]int32{
+ "TEXT": 0,
+ "HTML": 1,
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+ "GEO": 5,
+}
+
+func (x FieldValue_ContentType) Enum() *FieldValue_ContentType {
+ p := new(FieldValue_ContentType)
+ *p = x
+ return p
+}
+func (x FieldValue_ContentType) String() string {
+ return proto.EnumName(FieldValue_ContentType_name, int32(x))
+}
+func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FieldValue_ContentType(value)
+ return nil
+}
+
+type FacetValue_ContentType int32
+
+const (
+ FacetValue_ATOM FacetValue_ContentType = 2
+ FacetValue_DATE FacetValue_ContentType = 3
+ FacetValue_NUMBER FacetValue_ContentType = 4
+)
+
+var FacetValue_ContentType_name = map[int32]string{
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+}
+var FacetValue_ContentType_value = map[string]int32{
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+}
+
+func (x FacetValue_ContentType) Enum() *FacetValue_ContentType {
+ p := new(FacetValue_ContentType)
+ *p = x
+ return p
+}
+func (x FacetValue_ContentType) String() string {
+ return proto.EnumName(FacetValue_ContentType_name, int32(x))
+}
+func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FacetValue_ContentType(value)
+ return nil
+}
+
+type Document_Storage int32
+
+const (
+ Document_DISK Document_Storage = 0
+)
+
+var Document_Storage_name = map[int32]string{
+ 0: "DISK",
+}
+var Document_Storage_value = map[string]int32{
+ "DISK": 0,
+}
+
+func (x Document_Storage) Enum() *Document_Storage {
+ p := new(Document_Storage)
+ *p = x
+ return p
+}
+func (x Document_Storage) String() string {
+ return proto.EnumName(Document_Storage_name, int32(x))
+}
+func (x *Document_Storage) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage")
+ if err != nil {
+ return err
+ }
+ *x = Document_Storage(value)
+ return nil
+}
+
+type SearchServiceError_ErrorCode int32
+
+const (
+ SearchServiceError_OK SearchServiceError_ErrorCode = 0
+ SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1
+ SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2
+ SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3
+ SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4
+ SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5
+ SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6
+)
+
+var SearchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "PERMISSION_DENIED",
+ 5: "TIMEOUT",
+ 6: "CONCURRENT_TRANSACTION",
+}
+var SearchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "PERMISSION_DENIED": 4,
+ "TIMEOUT": 5,
+ "CONCURRENT_TRANSACTION": 6,
+}
+
+func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {
+ p := new(SearchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SearchServiceError_ErrorCode) String() string {
+ return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))
+}
+func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SearchServiceError_ErrorCode(value)
+ return nil
+}
+
+type IndexSpec_Consistency int32
+
+const (
+ IndexSpec_GLOBAL IndexSpec_Consistency = 0
+ IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1
+)
+
+var IndexSpec_Consistency_name = map[int32]string{
+ 0: "GLOBAL",
+ 1: "PER_DOCUMENT",
+}
+var IndexSpec_Consistency_value = map[string]int32{
+ "GLOBAL": 0,
+ "PER_DOCUMENT": 1,
+}
+
+func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {
+ p := new(IndexSpec_Consistency)
+ *p = x
+ return p
+}
+func (x IndexSpec_Consistency) String() string {
+ return proto.EnumName(IndexSpec_Consistency_name, int32(x))
+}
+func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Consistency(value)
+ return nil
+}
+
+type IndexSpec_Source int32
+
+const (
+ IndexSpec_SEARCH IndexSpec_Source = 0
+ IndexSpec_DATASTORE IndexSpec_Source = 1
+ IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2
+)
+
+var IndexSpec_Source_name = map[int32]string{
+ 0: "SEARCH",
+ 1: "DATASTORE",
+ 2: "CLOUD_STORAGE",
+}
+var IndexSpec_Source_value = map[string]int32{
+ "SEARCH": 0,
+ "DATASTORE": 1,
+ "CLOUD_STORAGE": 2,
+}
+
+func (x IndexSpec_Source) Enum() *IndexSpec_Source {
+ p := new(IndexSpec_Source)
+ *p = x
+ return p
+}
+func (x IndexSpec_Source) String() string {
+ return proto.EnumName(IndexSpec_Source_name, int32(x))
+}
+func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Source(value)
+ return nil
+}
+
+type IndexSpec_Mode int32
+
+const (
+ IndexSpec_PRIORITY IndexSpec_Mode = 0
+ IndexSpec_BACKGROUND IndexSpec_Mode = 1
+)
+
+var IndexSpec_Mode_name = map[int32]string{
+ 0: "PRIORITY",
+ 1: "BACKGROUND",
+}
+var IndexSpec_Mode_value = map[string]int32{
+ "PRIORITY": 0,
+ "BACKGROUND": 1,
+}
+
+func (x IndexSpec_Mode) Enum() *IndexSpec_Mode {
+ p := new(IndexSpec_Mode)
+ *p = x
+ return p
+}
+func (x IndexSpec_Mode) String() string {
+ return proto.EnumName(IndexSpec_Mode_name, int32(x))
+}
+func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Mode(value)
+ return nil
+}
+
+type IndexDocumentParams_Freshness int32
+
+const (
+ IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0
+ IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1
+)
+
+var IndexDocumentParams_Freshness_name = map[int32]string{
+ 0: "SYNCHRONOUSLY",
+ 1: "WHEN_CONVENIENT",
+}
+var IndexDocumentParams_Freshness_value = map[string]int32{
+ "SYNCHRONOUSLY": 0,
+ "WHEN_CONVENIENT": 1,
+}
+
+func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {
+ p := new(IndexDocumentParams_Freshness)
+ *p = x
+ return p
+}
+func (x IndexDocumentParams_Freshness) String() string {
+ return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))
+}
+func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness")
+ if err != nil {
+ return err
+ }
+ *x = IndexDocumentParams_Freshness(value)
+ return nil
+}
+
+type ScorerSpec_Scorer int32
+
+const (
+ ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0
+ ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2
+)
+
+var ScorerSpec_Scorer_name = map[int32]string{
+ 0: "RESCORING_MATCH_SCORER",
+ 2: "MATCH_SCORER",
+}
+var ScorerSpec_Scorer_value = map[string]int32{
+ "RESCORING_MATCH_SCORER": 0,
+ "MATCH_SCORER": 2,
+}
+
+func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {
+ p := new(ScorerSpec_Scorer)
+ *p = x
+ return p
+}
+func (x ScorerSpec_Scorer) String() string {
+ return proto.EnumName(ScorerSpec_Scorer_name, int32(x))
+}
+func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer")
+ if err != nil {
+ return err
+ }
+ *x = ScorerSpec_Scorer(value)
+ return nil
+}
+
+type SearchParams_CursorType int32
+
+const (
+ SearchParams_NONE SearchParams_CursorType = 0
+ SearchParams_SINGLE SearchParams_CursorType = 1
+ SearchParams_PER_RESULT SearchParams_CursorType = 2
+)
+
+var SearchParams_CursorType_name = map[int32]string{
+ 0: "NONE",
+ 1: "SINGLE",
+ 2: "PER_RESULT",
+}
+var SearchParams_CursorType_value = map[string]int32{
+ "NONE": 0,
+ "SINGLE": 1,
+ "PER_RESULT": 2,
+}
+
+func (x SearchParams_CursorType) Enum() *SearchParams_CursorType {
+ p := new(SearchParams_CursorType)
+ *p = x
+ return p
+}
+func (x SearchParams_CursorType) String() string {
+ return proto.EnumName(SearchParams_CursorType_name, int32(x))
+}
+func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_CursorType(value)
+ return nil
+}
+
+type SearchParams_ParsingMode int32
+
+const (
+ SearchParams_STRICT SearchParams_ParsingMode = 0
+ SearchParams_RELAXED SearchParams_ParsingMode = 1
+)
+
+var SearchParams_ParsingMode_name = map[int32]string{
+ 0: "STRICT",
+ 1: "RELAXED",
+}
+var SearchParams_ParsingMode_value = map[string]int32{
+ "STRICT": 0,
+ "RELAXED": 1,
+}
+
+func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {
+ p := new(SearchParams_ParsingMode)
+ *p = x
+ return p
+}
+func (x SearchParams_ParsingMode) String() string {
+ return proto.EnumName(SearchParams_ParsingMode_name, int32(x))
+}
+func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_ParsingMode(value)
+ return nil
+}
+
+type Scope struct {
+ Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Scope) Reset() { *m = Scope{} }
+func (m *Scope) String() string { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage() {}
+
+func (m *Scope) GetType() Scope_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Scope_USER_BY_CANONICAL_ID
+}
+
+func (m *Scope) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Entry struct {
+ Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
+ DisplayName *string `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+
+func (m *Entry) GetScope() *Scope {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *Entry) GetPermission() Entry_Permission {
+ if m != nil && m.Permission != nil {
+ return *m.Permission
+ }
+ return Entry_READ
+}
+
+func (m *Entry) GetDisplayName() string {
+ if m != nil && m.DisplayName != nil {
+ return *m.DisplayName
+ }
+ return ""
+}
+
+type AccessControlList struct {
+ Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"`
+ Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AccessControlList) Reset() { *m = AccessControlList{} }
+func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage() {}
+
+func (m *AccessControlList) GetOwner() string {
+ if m != nil && m.Owner != nil {
+ return *m.Owner
+ }
+ return ""
+}
+
+func (m *AccessControlList) GetEntries() []*Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type FieldValue struct {
+ Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+ Geo *FieldValue_Geo `protobuf:"group,4,opt" json:"geo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue) Reset() { *m = FieldValue{} }
+func (m *FieldValue) String() string { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage() {}
+
+const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
+const Default_FieldValue_Language string = "en"
+
+func (m *FieldValue) GetType() FieldValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FieldValue_Type
+}
+
+func (m *FieldValue) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_FieldValue_Language
+}
+
+func (m *FieldValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *FieldValue) GetGeo() *FieldValue_Geo {
+ if m != nil {
+ return m.Geo
+ }
+ return nil
+}
+
+type FieldValue_Geo struct {
+ Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"`
+ Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage() {}
+
+func (m *FieldValue_Geo) GetLat() float64 {
+ if m != nil && m.Lat != nil {
+ return *m.Lat
+ }
+ return 0
+}
+
+func (m *FieldValue_Geo) GetLng() float64 {
+ if m != nil && m.Lng != nil {
+ return *m.Lng
+ }
+ return 0
+}
+
+type Field struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage() {}
+
+func (m *Field) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetValue() *FieldValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type FieldTypes struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldTypes) Reset() { *m = FieldTypes{} }
+func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage() {}
+
+func (m *FieldTypes) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldTypes) GetType() []FieldValue_ContentType {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+type FacetValue struct {
+ Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetValue) Reset() { *m = FacetValue{} }
+func (m *FacetValue) String() string { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage() {}
+
+const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
+
+func (m *FacetValue) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FacetValue_Type
+}
+
+func (m *FacetValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+type Facet struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Facet) Reset() { *m = Facet{} }
+func (m *Facet) String() string { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage() {}
+
+func (m *Facet) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Facet) GetValue() *FacetValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Document struct {
+ Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+ OrderId *int32 `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"`
+ Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+ Acl *AccessControlList `protobuf:"bytes,6,opt,name=acl" json:"acl,omitempty"`
+ Version *int64 `protobuf:"varint,7,opt,name=version" json:"version,omitempty"`
+ Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Document) Reset() { *m = Document{} }
+func (m *Document) String() string { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage() {}
+
+const Default_Document_Language string = "en"
+const Default_Document_Storage Document_Storage = Document_DISK
+
+func (m *Document) GetId() string {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return ""
+}
+
+func (m *Document) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_Document_Language
+}
+
+func (m *Document) GetField() []*Field {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *Document) GetOrderId() int32 {
+ if m != nil && m.OrderId != nil {
+ return *m.OrderId
+ }
+ return 0
+}
+
+func (m *Document) GetStorage() Document_Storage {
+ if m != nil && m.Storage != nil {
+ return *m.Storage
+ }
+ return Default_Document_Storage
+}
+
+func (m *Document) GetAcl() *AccessControlList {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *Document) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *Document) GetFacet() []*Facet {
+ if m != nil {
+ return m.Facet
+ }
+ return nil
+}
+
+type SearchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchServiceError) Reset() { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage() {}
+
+type RequestStatus struct {
+ Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestStatus) Reset() { *m = RequestStatus{} }
+func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage() {}
+
+func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return SearchServiceError_OK
+}
+
+func (m *RequestStatus) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+type IndexSpec struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexSpec) Reset() { *m = IndexSpec{} }
+func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage() {}
+
+const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
+const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
+const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY
+
+func (m *IndexSpec) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetConsistency() IndexSpec_Consistency {
+ if m != nil && m.Consistency != nil {
+ return *m.Consistency
+ }
+ return Default_IndexSpec_Consistency
+}
+
+func (m *IndexSpec) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetVersion() int32 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *IndexSpec) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_IndexSpec_Source
+}
+
+func (m *IndexSpec) GetMode() IndexSpec_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_IndexSpec_Mode
+}
+
+type IndexMetadata struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata) Reset() { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage() {}
+
+func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetField() []*FieldTypes {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {
+ if m != nil {
+ return m.Storage
+ }
+ return nil
+}
+
+type IndexMetadata_Storage struct {
+ AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"`
+ Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage() {}
+
+func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
+ if m != nil && m.AmountUsed != nil {
+ return *m.AmountUsed
+ }
+ return 0
+}
+
+func (m *IndexMetadata_Storage) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type IndexDocumentParams struct {
+ Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
+ Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage() {}
+
+const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
+
+func (m *IndexDocumentParams) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {
+ if m != nil && m.Freshness != nil {
+ return *m.Freshness
+ }
+ return Default_IndexDocumentParams_Freshness
+}
+
+func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type IndexDocumentRequest struct {
+ Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage() {}
+
+func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *IndexDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type IndexDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ DocId []string `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage() {}
+
+func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *IndexDocumentResponse) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+type DeleteDocumentParams struct {
+ DocId []string `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage() {}
+
+func (m *DeleteDocumentParams) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteDocumentRequest struct {
+ Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage() {}
+
+func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage() {}
+
+func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type ListDocumentsParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"`
+ IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"`
+ Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage() {}
+
+const Default_ListDocumentsParams_IncludeStartDoc bool = true
+const Default_ListDocumentsParams_Limit int32 = 100
+
+func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *ListDocumentsParams) GetStartDocId() string {
+ if m != nil && m.StartDocId != nil {
+ return *m.StartDocId
+ }
+ return ""
+}
+
+func (m *ListDocumentsParams) GetIncludeStartDoc() bool {
+ if m != nil && m.IncludeStartDoc != nil {
+ return *m.IncludeStartDoc
+ }
+ return Default_ListDocumentsParams_IncludeStartDoc
+}
+
+func (m *ListDocumentsParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListDocumentsParams_Limit
+}
+
+func (m *ListDocumentsParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+type ListDocumentsRequest struct {
+ Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage() {}
+
+func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListDocumentsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListDocumentsResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage() {}
+
+func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListDocumentsResponse) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+type ListIndexesParams struct {
+ FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"`
+ IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"`
+ IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"`
+ Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage() {}
+
+const Default_ListIndexesParams_Limit int32 = 20
+const Default_ListIndexesParams_IncludeStartIndex bool = true
+const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *ListIndexesParams) GetFetchSchema() bool {
+ if m != nil && m.FetchSchema != nil {
+ return *m.FetchSchema
+ }
+ return false
+}
+
+func (m *ListIndexesParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListIndexesParams_Limit
+}
+
+func (m *ListIndexesParams) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetStartIndexName() string {
+ if m != nil && m.StartIndexName != nil {
+ return *m.StartIndexName
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetIncludeStartIndex() bool {
+ if m != nil && m.IncludeStartIndex != nil {
+ return *m.IncludeStartIndex
+ }
+ return Default_ListIndexesParams_IncludeStartIndex
+}
+
+func (m *ListIndexesParams) GetIndexNamePrefix() string {
+ if m != nil && m.IndexNamePrefix != nil {
+ return *m.IndexNamePrefix
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *ListIndexesParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_ListIndexesParams_Source
+}
+
+type ListIndexesRequest struct {
+ Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage() {}
+
+func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListIndexesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListIndexesResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage() {}
+
+func (m *ListIndexesResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {
+ if m != nil {
+ return m.IndexMetadata
+ }
+ return nil
+}
+
+type DeleteSchemaParams struct {
+ Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage() {}
+
+const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *DeleteSchemaParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_DeleteSchemaParams_Source
+}
+
+func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteSchemaRequest struct {
+ Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage() {}
+
+func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteSchemaRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteSchemaResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage() {}
+
+func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type SortSpec struct {
+ SortExpression *string `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"`
+ SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"`
+ DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"`
+ DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SortSpec) Reset() { *m = SortSpec{} }
+func (m *SortSpec) String() string { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage() {}
+
+const Default_SortSpec_SortDescending bool = true
+
+func (m *SortSpec) GetSortExpression() string {
+ if m != nil && m.SortExpression != nil {
+ return *m.SortExpression
+ }
+ return ""
+}
+
+func (m *SortSpec) GetSortDescending() bool {
+ if m != nil && m.SortDescending != nil {
+ return *m.SortDescending
+ }
+ return Default_SortSpec_SortDescending
+}
+
+func (m *SortSpec) GetDefaultValueText() string {
+ if m != nil && m.DefaultValueText != nil {
+ return *m.DefaultValueText
+ }
+ return ""
+}
+
+func (m *SortSpec) GetDefaultValueNumeric() float64 {
+ if m != nil && m.DefaultValueNumeric != nil {
+ return *m.DefaultValueNumeric
+ }
+ return 0
+}
+
+type ScorerSpec struct {
+ Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
+ MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ScorerSpec) Reset() { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage() {}
+
+const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
+const Default_ScorerSpec_Limit int32 = 1000
+
+func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {
+ if m != nil && m.Scorer != nil {
+ return *m.Scorer
+ }
+ return Default_ScorerSpec_Scorer
+}
+
+func (m *ScorerSpec) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ScorerSpec_Limit
+}
+
+func (m *ScorerSpec) GetMatchScorerParameters() string {
+ if m != nil && m.MatchScorerParameters != nil {
+ return *m.MatchScorerParameters
+ }
+ return ""
+}
+
+type FieldSpec struct {
+ Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
+ Expression []*FieldSpec_Expression `protobuf:"group,2,rep" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec) Reset() { *m = FieldSpec{} }
+func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage() {}
+
+func (m *FieldSpec) GetName() []string {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *FieldSpec) GetExpression() []*FieldSpec_Expression {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+type FieldSpec_Expression struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage() {}
+
+func (m *FieldSpec_Expression) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldSpec_Expression) GetExpression() string {
+ if m != nil && m.Expression != nil {
+ return *m.Expression
+ }
+ return ""
+}
+
+type FacetRange struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRange) Reset() { *m = FacetRange{} }
+func (m *FacetRange) String() string { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage() {}
+
+func (m *FacetRange) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRange) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRange) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type FacetRequestParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"`
+ Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
+ ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage() {}
+
+func (m *FacetRequestParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return 0
+}
+
+func (m *FacetRequestParam) GetRange() []*FacetRange {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+func (m *FacetRequestParam) GetValueConstraint() []string {
+ if m != nil {
+ return m.ValueConstraint
+ }
+ return nil
+}
+
+type FacetAutoDetectParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage() {}
+
+const Default_FacetAutoDetectParam_ValueLimit int32 = 10
+
+func (m *FacetAutoDetectParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return Default_FacetAutoDetectParam_ValueLimit
+}
+
+type FacetRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type *FacetValue_ContentType `protobuf:"varint,2,req,name=type,enum=search.FacetValue_ContentType" json:"type,omitempty"`
+ Params *FacetRequestParam `protobuf:"bytes,3,opt,name=params" json:"params,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequest) Reset() { *m = FacetRequest{} }
+func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage() {}
+
+func (m *FacetRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRequest) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FacetValue_ATOM
+}
+
+func (m *FacetRequest) GetParams() *FacetRequestParam {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+type FacetRefine struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type *FacetValue_ContentType `protobuf:"varint,2,req,name=type,enum=search.FacetValue_ContentType" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
+ Start *string `protobuf:"bytes,4,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,5,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefine) Reset() { *m = FacetRefine{} }
+func (m *FacetRefine) String() string { return proto.CompactTextString(m) }
+func (*FacetRefine) ProtoMessage() {}
+
+func (m *FacetRefine) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRefine) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FacetValue_ATOM
+}
+
+func (m *FacetRefine) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func (m *FacetRefine) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRefine) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type SearchParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
+ CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+ Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
+ MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"`
+ SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"`
+ ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"`
+ FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"`
+ ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+ AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"`
+ IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"`
+ FacetRefine []*FacetRefine `protobuf:"bytes,17,rep,name=facet_refine" json:"facet_refine,omitempty"`
+ FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchParams) Reset() { *m = SearchParams{} }
+func (m *SearchParams) String() string { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage() {}
+
+const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
+const Default_SearchParams_Limit int32 = 20
+const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT
+const Default_SearchParams_AutoDiscoverFacetCount int32 = 0
+
+func (m *SearchParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetQuery() string {
+ if m != nil && m.Query != nil {
+ return *m.Query
+ }
+ return ""
+}
+
+func (m *SearchParams) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *SearchParams) GetCursorType() SearchParams_CursorType {
+ if m != nil && m.CursorType != nil {
+ return *m.CursorType
+ }
+ return Default_SearchParams_CursorType
+}
+
+func (m *SearchParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_SearchParams_Limit
+}
+
+func (m *SearchParams) GetMatchedCountAccuracy() int32 {
+ if m != nil && m.MatchedCountAccuracy != nil {
+ return *m.MatchedCountAccuracy
+ }
+ return 0
+}
+
+func (m *SearchParams) GetSortSpec() []*SortSpec {
+ if m != nil {
+ return m.SortSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetScorerSpec() *ScorerSpec {
+ if m != nil {
+ return m.ScorerSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFieldSpec() *FieldSpec {
+ if m != nil {
+ return m.FieldSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {
+ if m != nil && m.ParsingMode != nil {
+ return *m.ParsingMode
+ }
+ return Default_SearchParams_ParsingMode
+}
+
+func (m *SearchParams) GetAutoDiscoverFacetCount() int32 {
+ if m != nil && m.AutoDiscoverFacetCount != nil {
+ return *m.AutoDiscoverFacetCount
+ }
+ return Default_SearchParams_AutoDiscoverFacetCount
+}
+
+func (m *SearchParams) GetIncludeFacet() []*FacetRequest {
+ if m != nil {
+ return m.IncludeFacet
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetRefine() []*FacetRefine {
+ if m != nil {
+ return m.FacetRefine
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {
+ if m != nil {
+ return m.FacetAutoDetectParam
+ }
+ return nil
+}
+
+type SearchRequest struct {
+ Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchRequest) Reset() { *m = SearchRequest{} }
+func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage() {}
+
+func (m *SearchRequest) GetParams() *SearchParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *SearchRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type FacetResultValue struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResultValue) Reset() { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage() {}
+
+func (m *FacetResultValue) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResultValue) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+type FacetResult struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type *FacetValue_ContentType `protobuf:"varint,2,req,name=type,enum=search.FacetValue_ContentType" json:"type,omitempty"`
+ Value []*FacetResultValue `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResult) Reset() { *m = FacetResult{} }
+func (m *FacetResult) String() string { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage() {}
+
+func (m *FacetResult) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResult) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FacetValue_ATOM
+}
+
+func (m *FacetResult) GetValue() []*FacetResultValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SearchResult struct {
+ Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"`
+ Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"`
+ Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"`
+ Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResult) Reset() { *m = SearchResult{} }
+func (m *SearchResult) String() string { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage() {}
+
+func (m *SearchResult) GetDocument() *Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *SearchResult) GetExpression() []*Field {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+func (m *SearchResult) GetScore() []float64 {
+ if m != nil {
+ return m.Score
+ }
+ return nil
+}
+
+func (m *SearchResult) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+type SearchResponse struct {
+ Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+ MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"`
+ Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResponse) Reset() { *m = SearchResponse{} }
+func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage() {}
+
+var extRange_SearchResponse = []proto.ExtensionRange{
+ {1000, 9999},
+}
+
+func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_SearchResponse
+}
+func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *SearchResponse) GetResult() []*SearchResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetMatchedCount() int64 {
+ if m != nil && m.MatchedCount != nil {
+ return *m.MatchedCount
+ }
+ return 0
+}
+
+func (m *SearchResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchResponse) GetFacetResult() []*FacetResult {
+ if m != nil {
+ return m.FacetResult
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto
new file mode 100644
index 000000000..629984137
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.proto
@@ -0,0 +1,376 @@
+syntax = "proto2";
+option go_package = "search";
+
+package search;
+
+message Scope {
+ enum Type {
+ USER_BY_CANONICAL_ID = 1;
+ USER_BY_EMAIL = 2;
+ GROUP_BY_CANONICAL_ID = 3;
+ GROUP_BY_EMAIL = 4;
+ GROUP_BY_DOMAIN = 5;
+ ALL_USERS = 6;
+ ALL_AUTHENTICATED_USERS = 7;
+ }
+
+ optional Type type = 1;
+ optional string value = 2;
+}
+
+message Entry {
+ enum Permission {
+ READ = 1;
+ WRITE = 2;
+ FULL_CONTROL = 3;
+ }
+
+ optional Scope scope = 1;
+ optional Permission permission = 2;
+ optional string display_name = 3;
+}
+
+message AccessControlList {
+ optional string owner = 1;
+ repeated Entry entries = 2;
+}
+
+message FieldValue {
+ enum ContentType {
+ TEXT = 0;
+ HTML = 1;
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ GEO = 5;
+ }
+
+ optional ContentType type = 1 [default = TEXT];
+
+ optional string language = 2 [default = "en"];
+
+ optional string string_value = 3;
+
+ optional group Geo = 4 {
+ required double lat = 5;
+ required double lng = 6;
+ }
+}
+
+message Field {
+ required string name = 1;
+ required FieldValue value = 2;
+}
+
+message FieldTypes {
+ required string name = 1;
+ repeated FieldValue.ContentType type = 2;
+}
+
+message FacetValue {
+ enum ContentType {
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ }
+
+ optional ContentType type = 1 [default = ATOM];
+ optional string string_value = 3;
+}
+
+message Facet {
+ required string name = 1;
+ required FacetValue value = 2;
+}
+
+
+message Document {
+ optional string id = 1;
+ optional string language = 2 [default = "en"];
+ repeated Field field = 3;
+ optional int32 order_id = 4;
+
+ enum Storage {
+ DISK = 0;
+ }
+
+ optional Storage storage = 5 [default = DISK];
+ optional AccessControlList acl = 6;
+ optional int64 version = 7;
+ repeated Facet facet = 8;
+}
+
+message SearchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ PERMISSION_DENIED = 4;
+ TIMEOUT = 5;
+ CONCURRENT_TRANSACTION = 6;
+ }
+}
+
+message RequestStatus {
+ required SearchServiceError.ErrorCode code = 1;
+ optional string error_detail = 2;
+}
+
+message IndexSpec {
+ required string name = 1;
+
+ enum Consistency {
+ GLOBAL = 0;
+ PER_DOCUMENT = 1;
+ }
+ optional Consistency consistency = 2 [default = PER_DOCUMENT];
+
+ optional string namespace = 3;
+ optional int32 version = 4;
+
+ enum Source {
+ SEARCH = 0;
+ DATASTORE = 1;
+ CLOUD_STORAGE = 2;
+ }
+ optional Source source = 5 [default = SEARCH];
+
+ enum Mode {
+ PRIORITY = 0;
+ BACKGROUND = 1;
+ }
+ optional Mode mode = 6 [default = PRIORITY];
+}
+
+message IndexMetadata {
+ required IndexSpec index_spec = 1;
+
+ repeated FieldTypes field = 2;
+
+ message Storage {
+ optional int64 amount_used = 1;
+ optional int64 limit = 2;
+ }
+ optional Storage storage = 3;
+}
+
+message IndexDocumentParams {
+ repeated Document document = 1;
+
+ enum Freshness {
+ SYNCHRONOUSLY = 0;
+ WHEN_CONVENIENT = 1;
+ }
+ optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];
+
+ required IndexSpec index_spec = 3;
+}
+
+message IndexDocumentRequest {
+ required IndexDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message IndexDocumentResponse {
+ repeated RequestStatus status = 1;
+
+ repeated string doc_id = 2;
+}
+
+message DeleteDocumentParams {
+ repeated string doc_id = 1;
+
+ required IndexSpec index_spec = 2;
+}
+
+message DeleteDocumentRequest {
+ required DeleteDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteDocumentResponse {
+ repeated RequestStatus status = 1;
+}
+
+message ListDocumentsParams {
+ required IndexSpec index_spec = 1;
+ optional string start_doc_id = 2;
+ optional bool include_start_doc = 3 [default = true];
+ optional int32 limit = 4 [default = 100];
+ optional bool keys_only = 5;
+}
+
+message ListDocumentsRequest {
+ required ListDocumentsParams params = 1;
+
+ optional bytes app_id = 2;
+}
+
+message ListDocumentsResponse {
+ required RequestStatus status = 1;
+
+ repeated Document document = 2;
+}
+
+message ListIndexesParams {
+ optional bool fetch_schema = 1;
+ optional int32 limit = 2 [default = 20];
+ optional string namespace = 3;
+ optional string start_index_name = 4;
+ optional bool include_start_index = 5 [default = true];
+ optional string index_name_prefix = 6;
+ optional int32 offset = 7;
+ optional IndexSpec.Source source = 8 [default = SEARCH];
+}
+
+message ListIndexesRequest {
+ required ListIndexesParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message ListIndexesResponse {
+ required RequestStatus status = 1;
+ repeated IndexMetadata index_metadata = 2;
+}
+
+message DeleteSchemaParams {
+ optional IndexSpec.Source source = 1 [default = SEARCH];
+ repeated IndexSpec index_spec = 2;
+}
+
+message DeleteSchemaRequest {
+ required DeleteSchemaParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteSchemaResponse {
+ repeated RequestStatus status = 1;
+}
+
+message SortSpec {
+ required string sort_expression = 1;
+ optional bool sort_descending = 2 [default = true];
+ optional string default_value_text = 4;
+ optional double default_value_numeric = 5;
+}
+
+message ScorerSpec {
+ enum Scorer {
+ RESCORING_MATCH_SCORER = 0;
+ MATCH_SCORER = 2;
+ }
+ optional Scorer scorer = 1 [default = MATCH_SCORER];
+
+ optional int32 limit = 2 [default = 1000];
+ optional string match_scorer_parameters = 9;
+}
+
+message FieldSpec {
+ repeated string name = 1;
+
+ repeated group Expression = 2 {
+ required string name = 3;
+ required string expression = 4;
+ }
+}
+
+message FacetRange {
+ optional string name = 1;
+ optional string start = 2;
+ optional string end = 3;
+}
+
+message FacetRequestParam {
+ optional int32 value_limit = 1;
+ repeated FacetRange range = 2;
+ repeated string value_constraint = 3;
+}
+
+message FacetAutoDetectParam {
+ optional int32 value_limit = 1 [default = 10];
+}
+
+message FacetRequest {
+ required string name = 1;
+ required FacetValue.ContentType type = 2;
+ optional FacetRequestParam params = 3;
+}
+
+message FacetRefine {
+ required string name = 1;
+ required FacetValue.ContentType type = 2;
+ optional string value = 3;
+ optional string start = 4;
+ optional string end = 5;
+}
+
+message SearchParams {
+ required IndexSpec index_spec = 1;
+ required string query = 2;
+ optional string cursor = 4;
+ optional int32 offset = 11;
+
+ enum CursorType {
+ NONE = 0;
+ SINGLE = 1;
+ PER_RESULT = 2;
+ }
+ optional CursorType cursor_type = 5 [default = NONE];
+
+ optional int32 limit = 6 [default = 20];
+ optional int32 matched_count_accuracy = 7;
+ repeated SortSpec sort_spec = 8;
+ optional ScorerSpec scorer_spec = 9;
+ optional FieldSpec field_spec = 10;
+ optional bool keys_only = 12;
+
+ enum ParsingMode {
+ STRICT = 0;
+ RELAXED = 1;
+ }
+ optional ParsingMode parsing_mode = 13 [default = STRICT];
+
+ optional int32 auto_discover_facet_count = 15 [default = 0];
+ repeated FacetRequest include_facet = 16;
+ repeated FacetRefine facet_refine = 17;
+ optional FacetAutoDetectParam facet_auto_detect_param = 18;
+}
+
+message SearchRequest {
+ required SearchParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message FacetResultValue {
+ required string name = 1;
+ required int32 count = 2;
+}
+
+message FacetResult {
+ required string name = 1;
+ required FacetValue.ContentType type = 2;
+ repeated FacetResultValue value = 3;
+}
+
+message SearchResult {
+ required Document document = 1;
+ repeated Field expression = 4;
+ repeated double score = 2;
+ optional string cursor = 3;
+}
+
+message SearchResponse {
+ repeated SearchResult result = 1;
+ required int64 matched_count = 2;
+ required RequestStatus status = 3;
+ optional string cursor = 4;
+ repeated FacetResult facet_result = 5;
+
+ extensions 1000 to 9999;
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
new file mode 100644
index 000000000..9180ba625
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
@@ -0,0 +1,1886 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+// DO NOT EDIT!
+
+/*
+Package taskqueue is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+
+It has these top-level messages:
+ TaskQueueServiceError
+ TaskPayload
+ TaskQueueRetryParameters
+ TaskQueueAcl
+ TaskQueueHttpHeader
+ TaskQueueMode
+ TaskQueueAddRequest
+ TaskQueueAddResponse
+ TaskQueueBulkAddRequest
+ TaskQueueBulkAddResponse
+ TaskQueueDeleteRequest
+ TaskQueueDeleteResponse
+ TaskQueueForceRunRequest
+ TaskQueueForceRunResponse
+ TaskQueueUpdateQueueRequest
+ TaskQueueUpdateQueueResponse
+ TaskQueueFetchQueuesRequest
+ TaskQueueFetchQueuesResponse
+ TaskQueueFetchQueueStatsRequest
+ TaskQueueScannerQueueInfo
+ TaskQueueFetchQueueStatsResponse
+ TaskQueuePauseQueueRequest
+ TaskQueuePauseQueueResponse
+ TaskQueuePurgeQueueRequest
+ TaskQueuePurgeQueueResponse
+ TaskQueueDeleteQueueRequest
+ TaskQueueDeleteQueueResponse
+ TaskQueueDeleteGroupRequest
+ TaskQueueDeleteGroupResponse
+ TaskQueueQueryTasksRequest
+ TaskQueueQueryTasksResponse
+ TaskQueueFetchTaskRequest
+ TaskQueueFetchTaskResponse
+ TaskQueueUpdateStorageLimitRequest
+ TaskQueueUpdateStorageLimitResponse
+ TaskQueueQueryAndOwnTasksRequest
+ TaskQueueQueryAndOwnTasksResponse
+ TaskQueueModifyTaskLeaseRequest
+ TaskQueueModifyTaskLeaseResponse
+*/
+package taskqueue
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+import appengine "google.golang.org/appengine/internal/datastore"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type TaskQueueServiceError_ErrorCode int32
+
+const (
+ TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0
+ TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1
+ TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2
+ TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3
+ TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4
+ TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5
+ TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6
+ TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7
+ TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8
+ TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9
+ TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10
+ TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11
+ TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12
+ TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13
+ TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14
+ TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15
+ TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16
+ TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17
+ TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18
+ TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19
+ TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20
+ TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21
+ TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22
+ TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23
+ TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24
+ TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25
+ TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26
+ TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000
+)
+
+var TaskQueueServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNKNOWN_QUEUE",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "TASK_TOO_LARGE",
+ 5: "INVALID_TASK_NAME",
+ 6: "INVALID_QUEUE_NAME",
+ 7: "INVALID_URL",
+ 8: "INVALID_QUEUE_RATE",
+ 9: "PERMISSION_DENIED",
+ 10: "TASK_ALREADY_EXISTS",
+ 11: "TOMBSTONED_TASK",
+ 12: "INVALID_ETA",
+ 13: "INVALID_REQUEST",
+ 14: "UNKNOWN_TASK",
+ 15: "TOMBSTONED_QUEUE",
+ 16: "DUPLICATE_TASK_NAME",
+ 17: "SKIPPED",
+ 18: "TOO_MANY_TASKS",
+ 19: "INVALID_PAYLOAD",
+ 20: "INVALID_RETRY_PARAMETERS",
+ 21: "INVALID_QUEUE_MODE",
+ 22: "ACL_LOOKUP_ERROR",
+ 23: "TRANSACTIONAL_REQUEST_TOO_LARGE",
+ 24: "INCORRECT_CREATOR_NAME",
+ 25: "TASK_LEASE_EXPIRED",
+ 26: "QUEUE_PAUSED",
+ 27: "INVALID_TAG",
+ 10000: "DATASTORE_ERROR",
+}
+var TaskQueueServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNKNOWN_QUEUE": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "TASK_TOO_LARGE": 4,
+ "INVALID_TASK_NAME": 5,
+ "INVALID_QUEUE_NAME": 6,
+ "INVALID_URL": 7,
+ "INVALID_QUEUE_RATE": 8,
+ "PERMISSION_DENIED": 9,
+ "TASK_ALREADY_EXISTS": 10,
+ "TOMBSTONED_TASK": 11,
+ "INVALID_ETA": 12,
+ "INVALID_REQUEST": 13,
+ "UNKNOWN_TASK": 14,
+ "TOMBSTONED_QUEUE": 15,
+ "DUPLICATE_TASK_NAME": 16,
+ "SKIPPED": 17,
+ "TOO_MANY_TASKS": 18,
+ "INVALID_PAYLOAD": 19,
+ "INVALID_RETRY_PARAMETERS": 20,
+ "INVALID_QUEUE_MODE": 21,
+ "ACL_LOOKUP_ERROR": 22,
+ "TRANSACTIONAL_REQUEST_TOO_LARGE": 23,
+ "INCORRECT_CREATOR_NAME": 24,
+ "TASK_LEASE_EXPIRED": 25,
+ "QUEUE_PAUSED": 26,
+ "INVALID_TAG": 27,
+ "DATASTORE_ERROR": 10000,
+}
+
+func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode {
+ p := new(TaskQueueServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x TaskQueueServiceError_ErrorCode) String() string {
+ return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x))
+}
+func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueServiceError_ErrorCode(value)
+ return nil
+}
+
+type TaskQueueMode_Mode int32
+
+const (
+ TaskQueueMode_PUSH TaskQueueMode_Mode = 0
+ TaskQueueMode_PULL TaskQueueMode_Mode = 1
+)
+
+var TaskQueueMode_Mode_name = map[int32]string{
+ 0: "PUSH",
+ 1: "PULL",
+}
+var TaskQueueMode_Mode_value = map[string]int32{
+ "PUSH": 0,
+ "PULL": 1,
+}
+
+func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode {
+ p := new(TaskQueueMode_Mode)
+ *p = x
+ return p
+}
+func (x TaskQueueMode_Mode) String() string {
+ return proto.EnumName(TaskQueueMode_Mode_name, int32(x))
+}
+func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueMode_Mode(value)
+ return nil
+}
+
+type TaskQueueAddRequest_RequestMethod int32
+
+const (
+ TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1
+ TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2
+ TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3
+ TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4
+ TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5
+)
+
+var TaskQueueAddRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueAddRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod {
+ p := new(TaskQueueAddRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueAddRequest_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueAddRequest_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RequestMethod int32
+
+const (
+ TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1
+ TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2
+ TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3
+ TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4
+ TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5
+)
+
+var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod {
+ p := new(TaskQueueQueryTasksResponse_Task_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} }
+func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueServiceError) ProtoMessage() {}
+
+type TaskPayload struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskPayload) Reset() { *m = TaskPayload{} }
+func (m *TaskPayload) String() string { return proto.CompactTextString(m) }
+func (*TaskPayload) ProtoMessage() {}
+
+func (m *TaskPayload) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *TaskPayload) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *TaskPayload) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *TaskPayload) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*TaskPayload)(nil)
+var _ proto.Unmarshaler = (*TaskPayload)(nil)
+
+var extRange_TaskPayload = []proto.ExtensionRange{
+ {10, 2147483646},
+}
+
+func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_TaskPayload
+}
+func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type TaskQueueRetryParameters struct {
+ RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"`
+ AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"`
+ MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"`
+ MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"`
+ MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} }
+func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueRetryParameters) ProtoMessage() {}
+
+const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1
+const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600
+const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16
+
+func (m *TaskQueueRetryParameters) GetRetryLimit() int32 {
+ if m != nil && m.RetryLimit != nil {
+ return *m.RetryLimit
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 {
+ if m != nil && m.AgeLimitSec != nil {
+ return *m.AgeLimitSec
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 {
+ if m != nil && m.MinBackoffSec != nil {
+ return *m.MinBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MinBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 {
+ if m != nil && m.MaxBackoffSec != nil {
+ return *m.MaxBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MaxBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 {
+ if m != nil && m.MaxDoublings != nil {
+ return *m.MaxDoublings
+ }
+ return Default_TaskQueueRetryParameters_MaxDoublings
+}
+
+type TaskQueueAcl struct {
+ UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"`
+ WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} }
+func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAcl) ProtoMessage() {}
+
+func (m *TaskQueueAcl) GetUserEmail() [][]byte {
+ if m != nil {
+ return m.UserEmail
+ }
+ return nil
+}
+
+func (m *TaskQueueAcl) GetWriterEmail() [][]byte {
+ if m != nil {
+ return m.WriterEmail
+ }
+ return nil
+}
+
+type TaskQueueHttpHeader struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} }
+func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueHttpHeader) ProtoMessage() {}
+
+func (m *TaskQueueHttpHeader) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueHttpHeader) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueMode struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} }
+func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueMode) ProtoMessage() {}
+
+type TaskQueueAddRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep" json:"header,omitempty"`
+ Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"`
+ Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"`
+ AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"`
+ Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} }
+func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest) ProtoMessage() {}
+
+const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST
+const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueAddRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return Default_TaskQueueAddRequest_Method
+}
+
+func (m *TaskQueueAddRequest) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueAddRequest_Mode
+}
+
+func (m *TaskQueueAddRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_Header struct {
+ Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} }
+func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_Header) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} }
+func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueAddResponse struct {
+ ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} }
+func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueAddResponse) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueBulkAddRequest struct {
+ AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} }
+func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddRequest) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest {
+ if m != nil {
+ return m.AddRequest
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse struct {
+ Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} }
+func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult {
+ if m != nil {
+ return m.Taskresult
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse_TaskResult struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} }
+func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueDeleteRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} }
+func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteResponse struct {
+ Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} }
+func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteResponse) ProtoMessage() {}
+
+func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+type TaskQueueForceRunRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} }
+func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunRequest) ProtoMessage() {}
+
+func (m *TaskQueueForceRunRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueForceRunResponse struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} }
+func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunResponse) ProtoMessage() {}
+
+func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+type TaskQueueUpdateQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} }
+func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueRequest) ProtoMessage() {}
+
+const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueUpdateQueueRequest_Mode
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+type TaskQueueUpdateQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} }
+func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueResponse) ProtoMessage() {}
+
+type TaskQueueFetchQueuesRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} }
+func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return 0
+}
+
+type TaskQueueFetchQueuesResponse struct {
+ Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep" json:"queue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} }
+func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue {
+ if m != nil {
+ return m.Queue
+ }
+ return nil
+}
+
+type TaskQueueFetchQueuesResponse_Queue struct {
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"`
+ CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} }
+func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false
+const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting"
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool {
+ if m != nil && m.Paused != nil {
+ return *m.Paused
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Paused
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Mode
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string {
+ if m != nil && m.CreatorName != nil {
+ return *m.CreatorName
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName
+}
+
+type TaskQueueFetchQueueStatsRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"`
+ MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} }
+func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0
+
+func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 {
+ if m != nil && m.MaxNumTasks != nil {
+ return *m.MaxNumTasks
+ }
+ return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks
+}
+
+type TaskQueueScannerQueueInfo struct {
+ ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"`
+ ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"`
+ SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"`
+ RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"`
+ EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} }
+func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueScannerQueueInfo) ProtoMessage() {}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 {
+ if m != nil && m.ExecutedLastMinute != nil {
+ return *m.ExecutedLastMinute
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 {
+ if m != nil && m.ExecutedLastHour != nil {
+ return *m.ExecutedLastHour
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 {
+ if m != nil && m.SamplingDurationSeconds != nil {
+ return *m.SamplingDurationSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 {
+ if m != nil && m.RequestsInFlight != nil {
+ return *m.RequestsInFlight
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 {
+ if m != nil && m.EnforcedRate != nil {
+ return *m.EnforcedRate
+ }
+ return 0
+}
+
+type TaskQueueFetchQueueStatsResponse struct {
+ Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} }
+func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats {
+ if m != nil {
+ return m.Queuestats
+ }
+ return nil
+}
+
+type TaskQueueFetchQueueStatsResponse_QueueStats struct {
+ NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"`
+ OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"`
+ ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() {
+ *m = TaskQueueFetchQueueStatsResponse_QueueStats{}
+}
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 {
+ if m != nil && m.NumTasks != nil {
+ return *m.NumTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 {
+ if m != nil && m.OldestEtaUsec != nil {
+ return *m.OldestEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo {
+ if m != nil {
+ return m.ScannerInfo
+ }
+ return nil
+}
+
+type TaskQueuePauseQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} }
+func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePauseQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetPause() bool {
+ if m != nil && m.Pause != nil {
+ return *m.Pause
+ }
+ return false
+}
+
+type TaskQueuePauseQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} }
+func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueResponse) ProtoMessage() {}
+
+type TaskQueuePurgeQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} }
+func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueuePurgeQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} }
+func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} }
+func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueueDeleteQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} }
+func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteGroupRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} }
+func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteGroupResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} }
+func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupResponse) ProtoMessage() {}
+
+type TaskQueueQueryTasksRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"`
+ StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"`
+ StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"`
+ MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} }
+func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1
+
+func (m *TaskQueueQueryTasksRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte {
+ if m != nil {
+ return m.StartTaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 {
+ if m != nil && m.StartEtaUsec != nil {
+ return *m.StartEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte {
+ if m != nil {
+ return m.StartTag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return Default_TaskQueueQueryTasksRequest_MaxRows
+}
+
+type TaskQueueQueryTasksResponse struct {
+ Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} }
+func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"`
+ RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep" json:"header,omitempty"`
+ BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"`
+ Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"`
+ CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"`
+ Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"`
+ Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"`
+ Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"`
+ ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} }
+func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0
+const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return TaskQueueQueryTasksResponse_Task_GET
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 {
+ if m != nil && m.BodySize != nil {
+ return *m.BodySize
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 {
+ if m != nil && m.CreationTimeUsec != nil {
+ return *m.CreationTimeUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog {
+ if m != nil {
+ return m.Runlog
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 {
+ if m != nil && m.FirstTryUsec != nil {
+ return *m.FirstTryUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 {
+ if m != nil && m.ExecutionCount != nil {
+ return *m.ExecutionCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount
+}
+
+type TaskQueueQueryTasksResponse_Task_Header struct {
+ Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_Header{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_CronTimetable{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RunLog struct {
+ DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"`
+ LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"`
+ ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"`
+ ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"`
+ RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_RunLog{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 {
+ if m != nil && m.DispatchedUsec != nil {
+ return *m.DispatchedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 {
+ if m != nil && m.LagUsec != nil {
+ return *m.LagUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 {
+ if m != nil && m.ElapsedUsec != nil {
+ return *m.ElapsedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 {
+ if m != nil && m.ResponseCode != nil {
+ return *m.ResponseCode
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string {
+ if m != nil && m.RetryReason != nil {
+ return *m.RetryReason
+ }
+ return ""
+}
+
+type TaskQueueFetchTaskRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} }
+func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueFetchTaskResponse struct {
+ Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} }
+func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueUpdateStorageLimitRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} }
+func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type TaskQueueUpdateStorageLimitResponse struct {
+ NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} }
+func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 {
+ if m != nil && m.NewLimit != nil {
+ return *m.NewLimit
+ }
+ return 0
+}
+
+type TaskQueueQueryAndOwnTasksRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"`
+ GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"`
+ Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} }
+func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 {
+ if m != nil && m.MaxTasks != nil {
+ return *m.MaxTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool {
+ if m != nil && m.GroupByTag != nil {
+ return *m.GroupByTag
+ }
+ return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse struct {
+ Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} }
+func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"`
+ Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() {
+ *m = TaskQueueQueryAndOwnTasksResponse_Task{}
+}
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueModifyTaskLeaseRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} }
+func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+type TaskQueueModifyTaskLeaseResponse struct {
+ UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} }
+func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 {
+ if m != nil && m.UpdatedEtaUsec != nil {
+ return *m.UpdatedEtaUsec
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
new file mode 100644
index 000000000..419aaf570
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
@@ -0,0 +1,342 @@
+syntax = "proto2";
+option go_package = "taskqueue";
+
+import "google.golang.org/appengine/internal/datastore/datastore_v3.proto";
+
+package appengine;
+
+message TaskQueueServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNKNOWN_QUEUE = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ TASK_TOO_LARGE = 4;
+ INVALID_TASK_NAME = 5;
+ INVALID_QUEUE_NAME = 6;
+ INVALID_URL = 7;
+ INVALID_QUEUE_RATE = 8;
+ PERMISSION_DENIED = 9;
+ TASK_ALREADY_EXISTS = 10;
+ TOMBSTONED_TASK = 11;
+ INVALID_ETA = 12;
+ INVALID_REQUEST = 13;
+ UNKNOWN_TASK = 14;
+ TOMBSTONED_QUEUE = 15;
+ DUPLICATE_TASK_NAME = 16;
+ SKIPPED = 17;
+ TOO_MANY_TASKS = 18;
+ INVALID_PAYLOAD = 19;
+ INVALID_RETRY_PARAMETERS = 20;
+ INVALID_QUEUE_MODE = 21;
+ ACL_LOOKUP_ERROR = 22;
+ TRANSACTIONAL_REQUEST_TOO_LARGE = 23;
+ INCORRECT_CREATOR_NAME = 24;
+ TASK_LEASE_EXPIRED = 25;
+ QUEUE_PAUSED = 26;
+ INVALID_TAG = 27;
+
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ DATASTORE_ERROR = 10000;
+ }
+}
+
+message TaskPayload {
+ extensions 10 to max;
+ option message_set_wire_format = true;
+}
+
+message TaskQueueRetryParameters {
+ optional int32 retry_limit = 1;
+ optional int64 age_limit_sec = 2;
+
+ optional double min_backoff_sec = 3 [default = 0.1];
+ optional double max_backoff_sec = 4 [default = 3600];
+ optional int32 max_doublings = 5 [default = 16];
+}
+
+message TaskQueueAcl {
+ repeated bytes user_email = 1;
+ repeated bytes writer_email = 2;
+}
+
+message TaskQueueHttpHeader {
+ required bytes key = 1;
+ required bytes value = 2;
+}
+
+message TaskQueueMode {
+ enum Mode {
+ PUSH = 0;
+ PULL = 1;
+ }
+}
+
+message TaskQueueAddRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5 [default=POST];
+
+ optional bytes url = 4;
+
+ repeated group Header = 6 {
+ required bytes key = 7;
+ required bytes value = 8;
+ }
+
+ optional bytes body = 9 [ctype=CORD];
+ optional Transaction transaction = 10;
+ optional bytes app_id = 11;
+
+ optional group CronTimetable = 12 {
+ required bytes schedule = 13;
+ required bytes timezone = 14;
+ }
+
+ optional bytes description = 15;
+ optional TaskPayload payload = 16;
+ optional TaskQueueRetryParameters retry_parameters = 17;
+ optional TaskQueueMode.Mode mode = 18 [default=PUSH];
+ optional bytes tag = 19;
+}
+
+message TaskQueueAddResponse {
+ optional bytes chosen_task_name = 1;
+}
+
+message TaskQueueBulkAddRequest {
+ repeated TaskQueueAddRequest add_request = 1;
+}
+
+message TaskQueueBulkAddResponse {
+ repeated group TaskResult = 1 {
+ required TaskQueueServiceError.ErrorCode result = 2;
+ optional bytes chosen_task_name = 3;
+ }
+}
+
+message TaskQueueDeleteRequest {
+ required bytes queue_name = 1;
+ repeated bytes task_name = 2;
+ optional bytes app_id = 3;
+}
+
+message TaskQueueDeleteResponse {
+ repeated TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueForceRunRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueForceRunResponse {
+ required TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueUpdateQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required int32 bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ optional TaskQueueRetryParameters retry_parameters = 6;
+ optional int32 max_concurrent_requests = 7;
+ optional TaskQueueMode.Mode mode = 8 [default = PUSH];
+ optional TaskQueueAcl acl = 9;
+ repeated TaskQueueHttpHeader header_override = 10;
+}
+
+message TaskQueueUpdateQueueResponse {
+}
+
+message TaskQueueFetchQueuesRequest {
+ optional bytes app_id = 1;
+ required int32 max_rows = 2;
+}
+
+message TaskQueueFetchQueuesResponse {
+ repeated group Queue = 1 {
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required double bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ required bool paused = 6 [default=false];
+ optional TaskQueueRetryParameters retry_parameters = 7;
+ optional int32 max_concurrent_requests = 8;
+ optional TaskQueueMode.Mode mode = 9 [default = PUSH];
+ optional TaskQueueAcl acl = 10;
+ repeated TaskQueueHttpHeader header_override = 11;
+ optional string creator_name = 12 [ctype=CORD, default="apphosting"];
+ }
+}
+
+message TaskQueueFetchQueueStatsRequest {
+ optional bytes app_id = 1;
+ repeated bytes queue_name = 2;
+ optional int32 max_num_tasks = 3 [default = 0];
+}
+
+message TaskQueueScannerQueueInfo {
+ required int64 executed_last_minute = 1;
+ required int64 executed_last_hour = 2;
+ required double sampling_duration_seconds = 3;
+ optional int32 requests_in_flight = 4;
+ optional double enforced_rate = 5;
+}
+
+message TaskQueueFetchQueueStatsResponse {
+ repeated group QueueStats = 1 {
+ required int32 num_tasks = 2;
+ required int64 oldest_eta_usec = 3;
+ optional TaskQueueScannerQueueInfo scanner_info = 4;
+ }
+}
+message TaskQueuePauseQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bool pause = 3;
+}
+
+message TaskQueuePauseQueueResponse {
+}
+
+message TaskQueuePurgeQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueuePurgeQueueResponse {
+}
+
+message TaskQueueDeleteQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueueDeleteQueueResponse {
+}
+
+message TaskQueueDeleteGroupRequest {
+ required bytes app_id = 1;
+}
+
+message TaskQueueDeleteGroupResponse {
+}
+
+message TaskQueueQueryTasksRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+
+ optional bytes start_task_name = 3;
+ optional int64 start_eta_usec = 4;
+ optional bytes start_tag = 6;
+ optional int32 max_rows = 5 [default = 1];
+}
+
+message TaskQueueQueryTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional bytes url = 4;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5;
+
+ optional int32 retry_count = 6 [default=0];
+
+ repeated group Header = 7 {
+ required bytes key = 8;
+ required bytes value = 9;
+ }
+
+ optional int32 body_size = 10;
+ optional bytes body = 11 [ctype=CORD];
+ required int64 creation_time_usec = 12;
+
+ optional group CronTimetable = 13 {
+ required bytes schedule = 14;
+ required bytes timezone = 15;
+ }
+
+ optional group RunLog = 16 {
+ required int64 dispatched_usec = 17;
+ required int64 lag_usec = 18;
+ required int64 elapsed_usec = 19;
+ optional int64 response_code = 20;
+ optional string retry_reason = 27;
+ }
+
+ optional bytes description = 21;
+ optional TaskPayload payload = 22;
+ optional TaskQueueRetryParameters retry_parameters = 23;
+ optional int64 first_try_usec = 24;
+ optional bytes tag = 25;
+ optional int32 execution_count = 26 [default=0];
+ }
+}
+
+message TaskQueueFetchTaskRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueFetchTaskResponse {
+ required TaskQueueQueryTasksResponse task = 1;
+}
+
+message TaskQueueUpdateStorageLimitRequest {
+ required bytes app_id = 1;
+ required int64 limit = 2;
+}
+
+message TaskQueueUpdateStorageLimitResponse {
+ required int64 new_limit = 1;
+}
+
+message TaskQueueQueryAndOwnTasksRequest {
+ required bytes queue_name = 1;
+ required double lease_seconds = 2;
+ required int64 max_tasks = 3;
+ optional bool group_by_tag = 4 [default=false];
+ optional bytes tag = 5;
+}
+
+message TaskQueueQueryAndOwnTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional int32 retry_count = 4 [default=0];
+ optional bytes body = 5 [ctype=CORD];
+ optional bytes tag = 6;
+ }
+}
+
+message TaskQueueModifyTaskLeaseRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ required double lease_seconds = 4;
+}
+
+message TaskQueueModifyTaskLeaseResponse {
+ required int64 updated_eta_usec = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 000000000..d8e063e3e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,103 @@
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+ if transactionFromContext(c) != nil {
+ return errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return ErrConcurrentTransaction
+ }
+ }
+ return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 000000000..e559522c6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,353 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+ URLFetchServiceError
+ URLFetchRequest
+ URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+
+type URLFetchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 000000000..f695edf6a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
new file mode 100644
index 000000000..67627d904
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
@@ -0,0 +1,287 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/user/user_service.proto
+// DO NOT EDIT!
+
+/*
+Package user is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/user/user_service.proto
+
+It has these top-level messages:
+ UserServiceError
+ CreateLoginURLRequest
+ CreateLoginURLResponse
+ CreateLogoutURLRequest
+ CreateLogoutURLResponse
+ GetOAuthUserRequest
+ GetOAuthUserResponse
+ CheckOAuthSignatureRequest
+ CheckOAuthSignatureResponse
+*/
+package user
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type UserServiceError_ErrorCode int32
+
+const (
+ UserServiceError_OK UserServiceError_ErrorCode = 0
+ UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1
+ UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2
+ UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3
+ UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4
+ UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5
+)
+
+var UserServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "REDIRECT_URL_TOO_LONG",
+ 2: "NOT_ALLOWED",
+ 3: "OAUTH_INVALID_TOKEN",
+ 4: "OAUTH_INVALID_REQUEST",
+ 5: "OAUTH_ERROR",
+}
+var UserServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "REDIRECT_URL_TOO_LONG": 1,
+ "NOT_ALLOWED": 2,
+ "OAUTH_INVALID_TOKEN": 3,
+ "OAUTH_INVALID_REQUEST": 4,
+ "OAUTH_ERROR": 5,
+}
+
+func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {
+ p := new(UserServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x UserServiceError_ErrorCode) String() string {
+ return proto.EnumName(UserServiceError_ErrorCode_name, int32(x))
+}
+func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = UserServiceError_ErrorCode(value)
+ return nil
+}
+
+type UserServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserServiceError) Reset() { *m = UserServiceError{} }
+func (m *UserServiceError) String() string { return proto.CompactTextString(m) }
+func (*UserServiceError) ProtoMessage() {}
+
+type CreateLoginURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} }
+func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLRequest) ProtoMessage() {}
+
+func (m *CreateLoginURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+type CreateLoginURLResponse struct {
+ LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} }
+func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLResponse) ProtoMessage() {}
+
+func (m *CreateLoginURLResponse) GetLoginUrl() string {
+ if m != nil && m.LoginUrl != nil {
+ return *m.LoginUrl
+ }
+ return ""
+}
+
+type CreateLogoutURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} }
+func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLRequest) ProtoMessage() {}
+
+func (m *CreateLogoutURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLogoutURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+type CreateLogoutURLResponse struct {
+ LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} }
+func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLResponse) ProtoMessage() {}
+
+func (m *CreateLogoutURLResponse) GetLogoutUrl() string {
+ if m != nil && m.LogoutUrl != nil {
+ return *m.LogoutUrl
+ }
+ return ""
+}
+
+type GetOAuthUserRequest struct {
+ Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} }
+func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserRequest) ProtoMessage() {}
+
+func (m *GetOAuthUserRequest) GetScope() string {
+ if m != nil && m.Scope != nil {
+ return *m.Scope
+ }
+ return ""
+}
+
+func (m *GetOAuthUserRequest) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type GetOAuthUserResponse struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"`
+ AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"`
+ UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"`
+ IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"`
+ ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"`
+ Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} }
+func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserResponse) ProtoMessage() {}
+
+const Default_GetOAuthUserResponse_IsAdmin bool = false
+
+func (m *GetOAuthUserResponse) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserId() string {
+ if m != nil && m.UserId != nil {
+ return *m.UserId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserOrganization() string {
+ if m != nil && m.UserOrganization != nil {
+ return *m.UserOrganization
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetIsAdmin() bool {
+ if m != nil && m.IsAdmin != nil {
+ return *m.IsAdmin
+ }
+ return Default_GetOAuthUserResponse_IsAdmin
+}
+
+func (m *GetOAuthUserResponse) GetClientId() string {
+ if m != nil && m.ClientId != nil {
+ return *m.ClientId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type CheckOAuthSignatureRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} }
+func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureRequest) ProtoMessage() {}
+
+type CheckOAuthSignatureResponse struct {
+ OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} }
+func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureResponse) ProtoMessage() {}
+
+func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {
+ if m != nil && m.OauthConsumerKey != nil {
+ return *m.OauthConsumerKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto
new file mode 100644
index 000000000..f3e969346
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto
@@ -0,0 +1,58 @@
+syntax = "proto2";
+option go_package = "user";
+
+package appengine;
+
+message UserServiceError {
+ enum ErrorCode {
+ OK = 0;
+ REDIRECT_URL_TOO_LONG = 1;
+ NOT_ALLOWED = 2;
+ OAUTH_INVALID_TOKEN = 3;
+ OAUTH_INVALID_REQUEST = 4;
+ OAUTH_ERROR = 5;
+ }
+}
+
+message CreateLoginURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+ optional string federated_identity = 3 [default = ""];
+}
+
+message CreateLoginURLResponse {
+ required string login_url = 1;
+}
+
+message CreateLogoutURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+}
+
+message CreateLogoutURLResponse {
+ required string logout_url = 1;
+}
+
+message GetOAuthUserRequest {
+ optional string scope = 1;
+
+ repeated string scopes = 2;
+}
+
+message GetOAuthUserResponse {
+ required string email = 1;
+ required string user_id = 2;
+ required string auth_domain = 3;
+ optional string user_organization = 4 [default = ""];
+ optional bool is_admin = 5 [default = false];
+ optional string client_id = 6 [default = ""];
+
+ repeated string scopes = 7;
+}
+
+message CheckOAuthSignatureRequest {
+}
+
+message CheckOAuthSignatureResponse {
+ required string oauth_consumer_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
new file mode 100644
index 000000000..be1e9865a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
@@ -0,0 +1,425 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+// DO NOT EDIT!
+
+/*
+Package xmpp is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+
+It has these top-level messages:
+ XmppServiceError
+ PresenceRequest
+ PresenceResponse
+ BulkPresenceRequest
+ BulkPresenceResponse
+ XmppMessageRequest
+ XmppMessageResponse
+ XmppSendPresenceRequest
+ XmppSendPresenceResponse
+ XmppInviteRequest
+ XmppInviteResponse
+*/
+package xmpp
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type XmppServiceError_ErrorCode int32
+
+const (
+ XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1
+ XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2
+ XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3
+ XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4
+ XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5
+ XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6
+ XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7
+ XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8
+ XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9
+)
+
+var XmppServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "INVALID_JID",
+ 3: "NO_BODY",
+ 4: "INVALID_XML",
+ 5: "INVALID_TYPE",
+ 6: "INVALID_SHOW",
+ 7: "EXCEEDED_MAX_SIZE",
+ 8: "APPID_ALIAS_REQUIRED",
+ 9: "NONDEFAULT_MODULE",
+}
+var XmppServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "INVALID_JID": 2,
+ "NO_BODY": 3,
+ "INVALID_XML": 4,
+ "INVALID_TYPE": 5,
+ "INVALID_SHOW": 6,
+ "EXCEEDED_MAX_SIZE": 7,
+ "APPID_ALIAS_REQUIRED": 8,
+ "NONDEFAULT_MODULE": 9,
+}
+
+func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode {
+ p := new(XmppServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x XmppServiceError_ErrorCode) String() string {
+ return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x))
+}
+func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = XmppServiceError_ErrorCode(value)
+ return nil
+}
+
+type PresenceResponse_SHOW int32
+
+const (
+ PresenceResponse_NORMAL PresenceResponse_SHOW = 0
+ PresenceResponse_AWAY PresenceResponse_SHOW = 1
+ PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2
+ PresenceResponse_CHAT PresenceResponse_SHOW = 3
+ PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4
+)
+
+var PresenceResponse_SHOW_name = map[int32]string{
+ 0: "NORMAL",
+ 1: "AWAY",
+ 2: "DO_NOT_DISTURB",
+ 3: "CHAT",
+ 4: "EXTENDED_AWAY",
+}
+var PresenceResponse_SHOW_value = map[string]int32{
+ "NORMAL": 0,
+ "AWAY": 1,
+ "DO_NOT_DISTURB": 2,
+ "CHAT": 3,
+ "EXTENDED_AWAY": 4,
+}
+
+func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW {
+ p := new(PresenceResponse_SHOW)
+ *p = x
+ return p
+}
+func (x PresenceResponse_SHOW) String() string {
+ return proto.EnumName(PresenceResponse_SHOW_name, int32(x))
+}
+func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW")
+ if err != nil {
+ return err
+ }
+ *x = PresenceResponse_SHOW(value)
+ return nil
+}
+
+type XmppMessageResponse_XmppMessageStatus int32
+
+const (
+ XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0
+ XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1
+ XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2
+)
+
+var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{
+ 0: "NO_ERROR",
+ 1: "INVALID_JID",
+ 2: "OTHER_ERROR",
+}
+var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{
+ "NO_ERROR": 0,
+ "INVALID_JID": 1,
+ "OTHER_ERROR": 2,
+}
+
+func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus {
+ p := new(XmppMessageResponse_XmppMessageStatus)
+ *p = x
+ return p
+}
+func (x XmppMessageResponse_XmppMessageStatus) String() string {
+ return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x))
+}
+func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus")
+ if err != nil {
+ return err
+ }
+ *x = XmppMessageResponse_XmppMessageStatus(value)
+ return nil
+}
+
+type XmppServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppServiceError) Reset() { *m = XmppServiceError{} }
+func (m *XmppServiceError) String() string { return proto.CompactTextString(m) }
+func (*XmppServiceError) ProtoMessage() {}
+
+type PresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceRequest) Reset() { *m = PresenceRequest{} }
+func (m *PresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*PresenceRequest) ProtoMessage() {}
+
+func (m *PresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *PresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type PresenceResponse struct {
+ IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"`
+ Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"`
+ Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceResponse) Reset() { *m = PresenceResponse{} }
+func (m *PresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*PresenceResponse) ProtoMessage() {}
+
+func (m *PresenceResponse) GetIsAvailable() bool {
+ if m != nil && m.IsAvailable != nil {
+ return *m.IsAvailable
+ }
+ return false
+}
+
+func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW {
+ if m != nil && m.Presence != nil {
+ return *m.Presence
+ }
+ return PresenceResponse_NORMAL
+}
+
+func (m *PresenceResponse) GetValid() bool {
+ if m != nil && m.Valid != nil {
+ return *m.Valid
+ }
+ return false
+}
+
+type BulkPresenceRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} }
+func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceRequest) ProtoMessage() {}
+
+func (m *BulkPresenceRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *BulkPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type BulkPresenceResponse struct {
+ PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} }
+func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceResponse) ProtoMessage() {}
+
+func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse {
+ if m != nil {
+ return m.PresenceResponse
+ }
+ return nil
+}
+
+type XmppMessageRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"`
+ RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"`
+ Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} }
+func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageRequest) ProtoMessage() {}
+
+const Default_XmppMessageRequest_RawXml bool = false
+const Default_XmppMessageRequest_Type string = "chat"
+
+func (m *XmppMessageRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *XmppMessageRequest) GetBody() string {
+ if m != nil && m.Body != nil {
+ return *m.Body
+ }
+ return ""
+}
+
+func (m *XmppMessageRequest) GetRawXml() bool {
+ if m != nil && m.RawXml != nil {
+ return *m.RawXml
+ }
+ return Default_XmppMessageRequest_RawXml
+}
+
+func (m *XmppMessageRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_XmppMessageRequest_Type
+}
+
+func (m *XmppMessageRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppMessageResponse struct {
+ Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} }
+func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageResponse) ProtoMessage() {}
+
+func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type XmppSendPresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
+ Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"`
+ Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} }
+func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceRequest) ProtoMessage() {}
+
+func (m *XmppSendPresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetShow() string {
+ if m != nil && m.Show != nil {
+ return *m.Show
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppSendPresenceResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} }
+func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceResponse) ProtoMessage() {}
+
+type XmppInviteRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} }
+func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteRequest) ProtoMessage() {}
+
+func (m *XmppInviteRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppInviteRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppInviteResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} }
+func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
new file mode 100644
index 000000000..472d52ebf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
@@ -0,0 +1,83 @@
+syntax = "proto2";
+option go_package = "xmpp";
+
+package appengine;
+
+message XmppServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ INVALID_JID = 2;
+ NO_BODY = 3;
+ INVALID_XML = 4;
+ INVALID_TYPE = 5;
+ INVALID_SHOW = 6;
+ EXCEEDED_MAX_SIZE = 7;
+ APPID_ALIAS_REQUIRED = 8;
+ NONDEFAULT_MODULE = 9;
+ }
+}
+
+message PresenceRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message PresenceResponse {
+ enum SHOW {
+ NORMAL = 0;
+ AWAY = 1;
+ DO_NOT_DISTURB = 2;
+ CHAT = 3;
+ EXTENDED_AWAY = 4;
+ }
+
+ required bool is_available = 1;
+ optional SHOW presence = 2;
+ optional bool valid = 3;
+}
+
+message BulkPresenceRequest {
+ repeated string jid = 1;
+ optional string from_jid = 2;
+}
+
+message BulkPresenceResponse {
+ repeated PresenceResponse presence_response = 1;
+}
+
+message XmppMessageRequest {
+ repeated string jid = 1;
+ required string body = 2;
+ optional bool raw_xml = 3 [ default = false ];
+ optional string type = 4 [ default = "chat" ];
+ optional string from_jid = 5;
+}
+
+message XmppMessageResponse {
+ enum XmppMessageStatus {
+ NO_ERROR = 0;
+ INVALID_JID = 1;
+ OTHER_ERROR = 2;
+ }
+
+ repeated XmppMessageStatus status = 1;
+}
+
+message XmppSendPresenceRequest {
+ required string jid = 1;
+ optional string type = 2;
+ optional string show = 3;
+ optional string status = 4;
+ optional string from_jid = 5;
+}
+
+message XmppSendPresenceResponse {
+}
+
+message XmppInviteRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message XmppInviteResponse {
+}
diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go
new file mode 100644
index 000000000..24d58601b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/api.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+// This file implements the logging API.
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Debugf formats its arguments according to the format, analogous to fmt.Printf,
+// and records the text as a log message at Debug level. The message will be associated
+// with the request linked with the provided context.
+func Debugf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 0, format, args...)
+}
+
+// Infof is like Debugf, but at Info level.
+func Infof(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 1, format, args...)
+}
+
+// Warningf is like Debugf, but at Warning level.
+func Warningf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 2, format, args...)
+}
+
+// Errorf is like Debugf, but at Error level.
+func Errorf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 3, format, args...)
+}
+
+// Criticalf is like Debugf, but at Critical level.
+func Criticalf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 4, format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go
new file mode 100644
index 000000000..cd89e5ced
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log.go
@@ -0,0 +1,323 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package log provides the means of querying an application's logs from
+within an App Engine application.
+
+Example:
+ c := appengine.NewContext(r)
+ query := &log.Query{
+ AppLogs: true,
+ Versions: []string{"1"},
+ }
+
+ for results := query.Run(c); ; {
+ record, err := results.Next()
+ if err == log.Done {
+ log.Infof(c, "Done processing results")
+ break
+ }
+ if err != nil {
+ log.Errorf(c, "Failed to retrieve next log: %v", err)
+ break
+ }
+ log.Infof(c, "Saw record %v", record)
+ }
+*/
+package log
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/log"
+)
+
+// Query defines a logs query.
+type Query struct {
+ // Start time specifies the earliest log to return (inclusive).
+ StartTime time.Time
+
+ // End time specifies the latest log to return (exclusive).
+ EndTime time.Time
+
+ // Offset specifies a position within the log stream to resume reading from,
+ // and should come from a previously returned Record's field of the same name.
+ Offset []byte
+
+ // Incomplete controls whether active (incomplete) requests should be included.
+ Incomplete bool
+
+ // AppLogs indicates if application-level logs should be included.
+ AppLogs bool
+
+ // ApplyMinLevel indicates if MinLevel should be used to filter results.
+ ApplyMinLevel bool
+
+ // If ApplyMinLevel is true, only logs for requests with at least one
+ // application log of MinLevel or higher will be returned.
+ MinLevel int
+
+ // Versions is the major version IDs whose logs should be retrieved.
+ // Logs for specific modules can be retrieved by the specifying versions
+ // in the form "module:version"; the default module is used if no module
+ // is specified.
+ Versions []string
+
+ // A list of requests to search for instead of a time-based scan. Cannot be
+ // combined with filtering options such as StartTime, EndTime, Offset,
+ // Incomplete, ApplyMinLevel, or Versions.
+ RequestIDs []string
+}
+
+// AppLog represents a single application-level log.
+type AppLog struct {
+ Time time.Time
+ Level int
+ Message string
+}
+
+// Record contains all the information for a single web request.
+type Record struct {
+ AppID string
+ ModuleID string
+ VersionID string
+ RequestID []byte
+ IP string
+ Nickname string
+ AppEngineRelease string
+
+ // The time when this request started.
+ StartTime time.Time
+
+ // The time when this request finished.
+ EndTime time.Time
+
+ // Opaque cursor into the result stream.
+ Offset []byte
+
+ // The time required to process the request.
+ Latency time.Duration
+ MCycles int64
+ Method string
+ Resource string
+ HTTPVersion string
+ Status int32
+
+ // The size of the request sent back to the client, in bytes.
+ ResponseSize int64
+ Referrer string
+ UserAgent string
+ URLMapEntry string
+ Combined string
+ Host string
+
+ // The estimated cost of this request, in dollars.
+ Cost float64
+ TaskQueueName string
+ TaskName string
+ WasLoadingRequest bool
+ PendingTime time.Duration
+ Finished bool
+ AppLogs []AppLog
+
+ // Mostly-unique identifier for the instance that handled the request if available.
+ InstanceID string
+}
+
+// Result represents the result of a query.
+type Result struct {
+ logs []*Record
+ context context.Context
+ request *pb.LogReadRequest
+ resultsSeen bool
+ err error
+}
+
+// Next returns the next log record,
+func (qr *Result) Next() (*Record, error) {
+ if qr.err != nil {
+ return nil, qr.err
+ }
+ if len(qr.logs) > 0 {
+ lr := qr.logs[0]
+ qr.logs = qr.logs[1:]
+ return lr, nil
+ }
+
+ if qr.request.Offset == nil && qr.resultsSeen {
+ return nil, Done
+ }
+
+ if err := qr.run(); err != nil {
+ // Errors here may be retried, so don't store the error.
+ return nil, err
+ }
+
+ return qr.Next()
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("log: query has no more results")
+
+// protoToAppLogs takes as input an array of pointers to LogLines, the internal
+// Protocol Buffer representation of a single application-level log,
+// and converts it to an array of AppLogs, the external representation
+// of an application-level log.
+func protoToAppLogs(logLines []*pb.LogLine) []AppLog {
+ appLogs := make([]AppLog, len(logLines))
+
+ for i, line := range logLines {
+ appLogs[i] = AppLog{
+ Time: time.Unix(0, *line.Time*1e3),
+ Level: int(*line.Level),
+ Message: *line.LogMessage,
+ }
+ }
+
+ return appLogs
+}
+
+// protoToRecord converts a RequestLog, the internal Protocol Buffer
+// representation of a single request-level log, to a Record, its
+// corresponding external representation.
+func protoToRecord(rl *pb.RequestLog) *Record {
+ offset, err := proto.Marshal(rl.Offset)
+ if err != nil {
+ offset = nil
+ }
+ return &Record{
+ AppID: *rl.AppId,
+ ModuleID: rl.GetModuleId(),
+ VersionID: *rl.VersionId,
+ RequestID: rl.RequestId,
+ Offset: offset,
+ IP: *rl.Ip,
+ Nickname: rl.GetNickname(),
+ AppEngineRelease: string(rl.GetAppEngineRelease()),
+ StartTime: time.Unix(0, *rl.StartTime*1e3),
+ EndTime: time.Unix(0, *rl.EndTime*1e3),
+ Latency: time.Duration(*rl.Latency) * time.Microsecond,
+ MCycles: *rl.Mcycles,
+ Method: *rl.Method,
+ Resource: *rl.Resource,
+ HTTPVersion: *rl.HttpVersion,
+ Status: *rl.Status,
+ ResponseSize: *rl.ResponseSize,
+ Referrer: rl.GetReferrer(),
+ UserAgent: rl.GetUserAgent(),
+ URLMapEntry: *rl.UrlMapEntry,
+ Combined: *rl.Combined,
+ Host: rl.GetHost(),
+ Cost: rl.GetCost(),
+ TaskQueueName: rl.GetTaskQueueName(),
+ TaskName: rl.GetTaskName(),
+ WasLoadingRequest: rl.GetWasLoadingRequest(),
+ PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond,
+ Finished: rl.GetFinished(),
+ AppLogs: protoToAppLogs(rl.Line),
+ InstanceID: string(rl.GetCloneKey()),
+ }
+}
+
+// Run starts a query for log records, which contain request and application
+// level log information.
+func (params *Query) Run(c context.Context) *Result {
+ req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c))
+ return &Result{
+ context: c,
+ request: req,
+ err: err,
+ }
+}
+
+func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {
+ req := &pb.LogReadRequest{}
+ req.AppId = &appID
+ if !params.StartTime.IsZero() {
+ req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)
+ }
+ if !params.EndTime.IsZero() {
+ req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)
+ }
+ if len(params.Offset) > 0 {
+ var offset pb.LogOffset
+ if err := proto.Unmarshal(params.Offset, &offset); err != nil {
+ return nil, fmt.Errorf("bad Offset: %v", err)
+ }
+ req.Offset = &offset
+ }
+ if params.Incomplete {
+ req.IncludeIncomplete = ¶ms.Incomplete
+ }
+ if params.AppLogs {
+ req.IncludeAppLogs = ¶ms.AppLogs
+ }
+ if params.ApplyMinLevel {
+ req.MinimumLogLevel = proto.Int32(int32(params.MinLevel))
+ }
+ if params.Versions == nil {
+ // If no versions were specified, default to the default module at
+ // the major version being used by this module.
+ if i := strings.Index(versionID, "."); i >= 0 {
+ versionID = versionID[:i]
+ }
+ req.VersionId = []string{versionID}
+ } else {
+ req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))
+ for _, v := range params.Versions {
+ var m *string
+ if i := strings.Index(v, ":"); i >= 0 {
+ m, v = proto.String(v[:i]), v[i+1:]
+ }
+ req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{
+ ModuleId: m,
+ VersionId: proto.String(v),
+ })
+ }
+ }
+ if params.RequestIDs != nil {
+ ids := make([][]byte, len(params.RequestIDs))
+ for i, v := range params.RequestIDs {
+ ids[i] = []byte(v)
+ }
+ req.RequestId = ids
+ }
+
+ return req, nil
+}
+
+// run takes the query Result produced by a call to Run and updates it with
+// more Records. The updated Result contains a new set of logs as well as an
+// offset to where more logs can be found. We also convert the items in the
+// response from their internal representations to external versions of the
+// same structs.
+func (r *Result) run() error {
+ res := &pb.LogReadResponse{}
+ if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil {
+ return err
+ }
+
+ r.logs = make([]*Record, len(res.Log))
+ r.request.Offset = res.Offset
+ r.resultsSeen = true
+
+ for i, log := range res.Log {
+ r.logs[i] = protoToRecord(log)
+ }
+
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail.go b/vendor/google.golang.org/appengine/mail/mail.go
new file mode 100644
index 000000000..be0eb518c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail.go
@@ -0,0 +1,123 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package mail provides the means of sending email from an
+App Engine application.
+
+Example:
+ msg := &mail.Message{
+ Sender: "romeo@montague.com",
+ To: []string{"Juliet "},
+ Subject: "See you tonight",
+ Body: "Don't forget our plans. Hark, 'til later.",
+ }
+ if err := mail.Send(c, msg); err != nil {
+ log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err)
+ }
+*/
+package mail
+
+import (
+ "net/mail"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ bpb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+// A Message represents an email message.
+// Addresses may be of any form permitted by RFC 822.
+type Message struct {
+ // Sender must be set, and must be either an application admin
+ // or the currently signed-in user.
+ Sender string
+ ReplyTo string // may be empty
+
+ // At least one of these slices must have a non-zero length,
+ // except when calling SendToAdmins.
+ To, Cc, Bcc []string
+
+ Subject string
+
+ // At least one of Body or HTMLBody must be non-empty.
+ Body string
+ HTMLBody string
+
+ Attachments []Attachment
+
+ // Extra mail headers.
+ // See https://cloud.google.com/appengine/docs/go/mail/
+ // for permissible headers.
+ Headers mail.Header
+}
+
+// An Attachment represents an email attachment.
+type Attachment struct {
+ // Name must be set to a valid file name.
+ Name string
+ Data []byte
+ ContentID string
+}
+
+// Send sends an email message.
+func Send(c context.Context, msg *Message) error {
+ return send(c, "Send", msg)
+}
+
+// SendToAdmins sends an email message to the application's administrators.
+func SendToAdmins(c context.Context, msg *Message) error {
+ return send(c, "SendToAdmins", msg)
+}
+
+func send(c context.Context, method string, msg *Message) error {
+ req := &pb.MailMessage{
+ Sender: &msg.Sender,
+ To: msg.To,
+ Cc: msg.Cc,
+ Bcc: msg.Bcc,
+ Subject: &msg.Subject,
+ }
+ if msg.ReplyTo != "" {
+ req.ReplyTo = &msg.ReplyTo
+ }
+ if msg.Body != "" {
+ req.TextBody = &msg.Body
+ }
+ if msg.HTMLBody != "" {
+ req.HtmlBody = &msg.HTMLBody
+ }
+ if len(msg.Attachments) > 0 {
+ req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments))
+ for i, att := range msg.Attachments {
+ req.Attachment[i] = &pb.MailAttachment{
+ FileName: proto.String(att.Name),
+ Data: att.Data,
+ }
+ if att.ContentID != "" {
+ req.Attachment[i].ContentID = proto.String(att.ContentID)
+ }
+ }
+ }
+ for key, vs := range msg.Headers {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.MailHeader{
+ Name: proto.String(key),
+ Value: proto.String(v),
+ })
+ }
+ }
+ res := &bpb.VoidProto{}
+ if err := internal.Call(c, "mail", method, req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go
new file mode 100644
index 000000000..515505504
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache.go
@@ -0,0 +1,526 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package memcache provides a client for App Engine's distributed in-memory
+// key-value store for small chunks of arbitrary data.
+//
+// The fundamental operations get and set items, keyed by a string.
+//
+// item0, err := memcache.Get(c, "key")
+// if err != nil && err != memcache.ErrCacheMiss {
+// return err
+// }
+// if err == nil {
+// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value)
+// } else {
+// fmt.Fprintf(w, "memcache miss\n")
+// }
+//
+// and
+//
+// item1 := &memcache.Item{
+// Key: "foo",
+// Value: []byte("bar"),
+// }
+// if err := memcache.Set(c, item1); err != nil {
+// return err
+// }
+package memcache
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var (
+ // ErrCacheMiss means that an operation failed
+ // because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+ // ErrServerError means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+)
+
+// Item is the unit of memcache gets and sets.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+ // Value is the Item's value.
+ Value []byte
+ // Object is the Item's value for use with a Codec.
+ Object interface{}
+ // Flags are server-opaque flags whose semantics are entirely up to the
+ // App Engine app.
+ Flags uint32
+ // Expiration is the maximum duration that the item will stay
+ // in the cache.
+ // The zero value means the Item has no expiration time.
+ // Subsecond precision is ignored.
+ // This is not set when getting items.
+ Expiration time.Duration
+ // casID is a client-opaque value used for compare-and-swap operations.
+ // Zero means that compare-and-swap is not used.
+ casID uint64
+}
+
+const (
+ secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code
+ thirtyYears = time.Duration(secondsIn30Years) * time.Second
+)
+
+// protoToItem converts a protocol buffer item to a Go struct.
+func protoToItem(p *pb.MemcacheGetResponse_Item) *Item {
+ return &Item{
+ Key: string(p.Key),
+ Value: p.Value,
+ Flags: p.GetFlags(),
+ casID: p.GetCasId(),
+ }
+}
+
+// If err is an appengine.MultiError, return its first element. Otherwise, return err.
+func singleError(err error) error {
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a memcache
+// cache miss. The key must be at most 250 bytes in length.
+func Get(c context.Context, key string) (*Item, error) {
+ m, err := GetMulti(c, []string{key})
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := m[key]; !ok {
+ return nil, ErrCacheMiss
+ }
+ return m[key], nil
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to items may
+// have fewer elements than the input slice, due to memcache cache misses.
+// Each key must be at most 250 bytes in length.
+func GetMulti(c context.Context, key []string) (map[string]*Item, error) {
+ if len(key) == 0 {
+ return nil, nil
+ }
+ keyAsBytes := make([][]byte, len(key))
+ for i, k := range key {
+ keyAsBytes[i] = []byte(k)
+ }
+ req := &pb.MemcacheGetRequest{
+ Key: keyAsBytes,
+ ForCas: proto.Bool(true),
+ }
+ res := &pb.MemcacheGetResponse{}
+ if err := internal.Call(c, "memcache", "Get", req, res); err != nil {
+ return nil, err
+ }
+ m := make(map[string]*Item, len(res.Item))
+ for _, p := range res.Item {
+ t := protoToItem(p)
+ m[t.Key] = t
+ }
+ return m, nil
+}
+
+// Delete deletes the item for the given key.
+// ErrCacheMiss is returned if the specified item can not be found.
+// The key must be at most 250 bytes in length.
+func Delete(c context.Context, key string) error {
+ return singleError(DeleteMulti(c, []string{key}))
+}
+
+// DeleteMulti is a batch version of Delete.
+// If any keys cannot be found, an appengine.MultiError is returned.
+// Each key must be at most 250 bytes in length.
+func DeleteMulti(c context.Context, key []string) error {
+ if len(key) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheDeleteRequest{
+ Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)),
+ }
+ for i, k := range key {
+ req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}
+ }
+ res := &pb.MemcacheDeleteResponse{}
+ if err := internal.Call(c, "memcache", "Delete", req, res); err != nil {
+ return err
+ }
+ if len(res.DeleteStatus) != len(key) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(key)), false
+ for i, s := range res.DeleteStatus {
+ switch s {
+ case pb.MemcacheDeleteResponse_DELETED:
+ // OK
+ case pb.MemcacheDeleteResponse_NOT_FOUND:
+ me[i] = ErrCacheMiss
+ any = true
+ default:
+ me[i] = ErrServerError
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Increment atomically increments the decimal value in the given key
+// by delta and returns the new value. The value must fit in a uint64.
+// Overflow wraps around, and underflow is capped to zero. The
+// provided delta may be negative. If the key doesn't exist in
+// memcache, the provided initial value is used to atomically
+// populate it before the delta is applied.
+// The key must be at most 250 bytes in length.
+func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {
+ return incr(c, key, delta, &initialValue)
+}
+
+// IncrementExisting works like Increment but assumes that the key
+// already exists in memcache and doesn't take an initial value.
+// IncrementExisting can save work if calculating the initial value is
+// expensive.
+// An error is returned if the specified item can not be found.
+func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) {
+ return incr(c, key, delta, nil)
+}
+
+func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {
+ req := &pb.MemcacheIncrementRequest{
+ Key: []byte(key),
+ InitialValue: initialValue,
+ }
+ if delta >= 0 {
+ req.Delta = proto.Uint64(uint64(delta))
+ } else {
+ req.Delta = proto.Uint64(uint64(-delta))
+ req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()
+ }
+ res := &pb.MemcacheIncrementResponse{}
+ err = internal.Call(c, "memcache", "Increment", req, res)
+ if err != nil {
+ return
+ }
+ if res.NewValue == nil {
+ return 0, ErrCacheMiss
+ }
+ return *res.NewValue, nil
+}
+
+// set sets the given items using the given conflict resolution policy.
+// appengine.MultiError may be returned.
+func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {
+ if len(item) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheSetRequest{
+ Item: make([]*pb.MemcacheSetRequest_Item, len(item)),
+ }
+ for i, t := range item {
+ p := &pb.MemcacheSetRequest_Item{
+ Key: []byte(t.Key),
+ }
+ if value == nil {
+ p.Value = t.Value
+ } else {
+ p.Value = value[i]
+ }
+ if t.Flags != 0 {
+ p.Flags = proto.Uint32(t.Flags)
+ }
+ if t.Expiration != 0 {
+ // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)
+ // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).
+ // Throughout this .go file, we use int32.
+ // Also, in the proto, the expiration value is either a duration (in seconds)
+ // or an absolute Unix timestamp (in seconds), depending on whether the
+ // value is less than or greater than or equal to 30 years, respectively.
+ if t.Expiration < time.Second {
+ // Because an Expiration of 0 means no expiration, we take
+ // care here to translate an item with an expiration
+ // Duration between 0-1 seconds as immediately expiring
+ // (saying it expired a few seconds ago), rather than
+ // rounding it down to 0 and making it live forever.
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)
+ } else if t.Expiration >= thirtyYears {
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))
+ } else {
+ p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))
+ }
+ }
+ if t.casID != 0 {
+ p.CasId = proto.Uint64(t.casID)
+ p.ForCas = proto.Bool(true)
+ }
+ p.SetPolicy = policy.Enum()
+ req.Item[i] = p
+ }
+ res := &pb.MemcacheSetResponse{}
+ if err := internal.Call(c, "memcache", "Set", req, res); err != nil {
+ return err
+ }
+ if len(res.SetStatus) != len(item) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(item)), false
+ for i, st := range res.SetStatus {
+ var err error
+ switch st {
+ case pb.MemcacheSetResponse_STORED:
+ // OK
+ case pb.MemcacheSetResponse_NOT_STORED:
+ err = ErrNotStored
+ case pb.MemcacheSetResponse_EXISTS:
+ err = ErrCASConflict
+ default:
+ err = ErrServerError
+ }
+ if err != nil {
+ me[i] = err
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Set writes the given item, unconditionally.
+func Set(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func SetMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func Add(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func AddMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func CompareAndSwapMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_CAS)
+}
+
+// Codec represents a symmetric pair of functions that implement a codec.
+// Items stored into or retrieved from memcache using a Codec have their
+// values marshaled or unmarshaled.
+//
+// All the methods provided for Codec behave analogously to the package level
+// function with same name.
+type Codec struct {
+ Marshal func(interface{}) ([]byte, error)
+ Unmarshal func([]byte, interface{}) error
+}
+
+// Get gets the item for the given key and decodes the obtained value into v.
+// ErrCacheMiss is returned for a memcache cache miss.
+// The key must be at most 250 bytes in length.
+func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) {
+ i, err := Get(c, key)
+ if err != nil {
+ return nil, err
+ }
+ if err := cd.Unmarshal(i.Value, v); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {
+ var vs [][]byte
+ var me appengine.MultiError
+ for i, item := range items {
+ v, err := cd.Marshal(item.Object)
+ if err != nil {
+ if me == nil {
+ me = make(appengine.MultiError, len(items))
+ }
+ me[i] = err
+ continue
+ }
+ if me == nil {
+ vs = append(vs, v)
+ }
+ }
+ if me != nil {
+ return me
+ }
+
+ return set(c, items, vs, policy)
+}
+
+// Set writes the given item, unconditionally.
+func (cd Codec) Set(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func (cd Codec) SetMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func (cd Codec) Add(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func (cd Codec) AddMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func (cd Codec) CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_CAS)
+}
+
+var (
+ // Gob is a Codec that uses the gob package.
+ Gob = Codec{gobMarshal, gobUnmarshal}
+ // JSON is a Codec that uses the json package.
+ JSON = Codec{json.Marshal, json.Unmarshal}
+)
+
+func gobMarshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func gobUnmarshal(data []byte, v interface{}) error {
+ return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)
+}
+
+// Statistics represents a set of statistics about the memcache cache.
+// This may include items that have expired but have not yet been removed from the cache.
+type Statistics struct {
+ Hits uint64 // Counter of cache hits
+ Misses uint64 // Counter of cache misses
+ ByteHits uint64 // Counter of bytes transferred for gets
+
+ Items uint64 // Items currently in the cache
+ Bytes uint64 // Size of all items currently in the cache
+
+ Oldest int64 // Age of access of the oldest item, in seconds
+}
+
+// Stats retrieves the current memcache statistics.
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.MemcacheStatsRequest{}
+ res := &pb.MemcacheStatsResponse{}
+ if err := internal.Call(c, "memcache", "Stats", req, res); err != nil {
+ return nil, err
+ }
+ if res.Stats == nil {
+ return nil, ErrNoStats
+ }
+ return &Statistics{
+ Hits: *res.Stats.Hits,
+ Misses: *res.Stats.Misses,
+ ByteHits: *res.Stats.ByteHits,
+ Items: *res.Stats.Items,
+ Bytes: *res.Stats.Bytes,
+ Oldest: int64(*res.Stats.OldestItemAge),
+ }, nil
+}
+
+// Flush flushes all items from memcache.
+func Flush(c context.Context) error {
+ req := &pb.MemcacheFlushRequest{}
+ res := &pb.MemcacheFlushResponse{}
+ return internal.Call(c, "memcache", "FlushAll", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ switch m := m.(type) {
+ case *pb.MemcacheDeleteRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheGetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheIncrementRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheSetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name)
+ internal.NamespaceMods["memcache"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/module/module.go b/vendor/google.golang.org/appengine/module/module.go
new file mode 100644
index 000000000..aaf020cc4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module.go
@@ -0,0 +1,113 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package module provides functions for interacting with modules.
+
+The appengine package contains functions that report the identity of the app,
+including the module name.
+*/
+package module
+
+import (
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+// List returns the names of modules belonging to this application.
+func List(c context.Context) ([]string, error) {
+ req := &pb.GetModulesRequest{}
+ res := &pb.GetModulesResponse{}
+ err := internal.Call(c, "modules", "GetModules", req, res)
+ return res.Module, err
+}
+
+// NumInstances returns the number of instances of the given module/version.
+// If either argument is the empty string it means the default.
+func NumInstances(c context.Context, module, version string) (int, error) {
+ req := &pb.GetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.GetNumInstancesResponse{}
+
+ if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil {
+ return 0, err
+ }
+ return int(*res.Instances), nil
+}
+
+// SetNumInstances sets the number of instances of the given module.version to the
+// specified value. If either module or version are the empty string it means the
+// default.
+func SetNumInstances(c context.Context, module, version string, instances int) error {
+ req := &pb.SetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ req.Instances = proto.Int64(int64(instances))
+ res := &pb.SetNumInstancesResponse{}
+ return internal.Call(c, "modules", "SetNumInstances", req, res)
+}
+
+// Versions returns the names of the versions that belong to the specified module.
+// If module is the empty string, it means the default module.
+func Versions(c context.Context, module string) ([]string, error) {
+ req := &pb.GetVersionsRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetVersionsResponse{}
+ err := internal.Call(c, "modules", "GetVersions", req, res)
+ return res.GetVersion(), err
+}
+
+// DefaultVersion returns the default version of the specified module.
+// If module is the empty string, it means the default module.
+func DefaultVersion(c context.Context, module string) (string, error) {
+ req := &pb.GetDefaultVersionRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetDefaultVersionResponse{}
+ err := internal.Call(c, "modules", "GetDefaultVersion", req, res)
+ return res.GetVersion(), err
+}
+
+// Start starts the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Start(c context.Context, module, version string) error {
+ req := &pb.StartModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StartModuleResponse{}
+ return internal.Call(c, "modules", "StartModule", req, res)
+}
+
+// Stop stops the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Stop(c context.Context, module, version string) error {
+ req := &pb.StopModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StopModuleResponse{}
+ return internal.Call(c, "modules", "StopModule", req, res)
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 000000000..9c00e9077
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+ if !validNamespace.MatchString(namespace) {
+ return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+ }
+ n := &namespacedContext{
+ ctx: c,
+ namespace: namespace,
+ }
+ return internal.WithNamespace(internal.WithCallOverride(c, n.call), namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
+
+// namespacedContext wraps a Context to support namespaces.
+type namespacedContext struct {
+ ctx context.Context
+ namespace string
+}
+
+func (n *namespacedContext) call(_ context.Context, service, method string, in, out proto.Message) error {
+ // Apply any namespace mods.
+ if mod, ok := internal.NamespaceMods[service]; ok {
+ mod(in, n.namespace)
+ }
+ return internal.Call(n.ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client.go b/vendor/google.golang.org/appengine/remote_api/client.go
new file mode 100644
index 000000000..dbe219dbb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client.go
@@ -0,0 +1,174 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+// This file provides the client for connecting remotely to a user's production
+// application.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+)
+
+// NewRemoteContext returns a context that gives access to the production
+// APIs for the application at the given host. All communication will be
+// performed over SSL unless the host is localhost.
+func NewRemoteContext(host string, client *http.Client) (context.Context, error) {
+ // Add an appcfg header to outgoing requests.
+ t := client.Transport
+ if t == nil {
+ t = http.DefaultTransport
+ }
+ client.Transport = &headerAddingRoundTripper{t}
+
+ url := url.URL{
+ Scheme: "https",
+ Host: host,
+ Path: "/_ah/remote_api",
+ }
+ if host == "localhost" || strings.HasPrefix(host, "localhost:") {
+ url.Scheme = "http"
+ }
+ u := url.String()
+ appID, err := getAppID(client, u)
+ if err != nil {
+ return nil, fmt.Errorf("unable to contact server: %v", err)
+ }
+ rc := &remoteContext{
+ client: client,
+ url: u,
+ }
+ ctx := internal.WithCallOverride(context.Background(), rc.call)
+ ctx = internal.WithLogOverride(ctx, rc.logf)
+ ctx = internal.WithAppIDOverride(ctx, appID)
+ return ctx, nil
+}
+
+type remoteContext struct {
+ client *http.Client
+ url string
+}
+
+var logLevels = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func (c *remoteContext) logf(level int64, format string, args ...interface{}) {
+ log.Printf(logLevels[level]+": "+format, args...)
+}
+
+func (c *remoteContext) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ req, err := proto.Marshal(in)
+ if err != nil {
+ return fmt.Errorf("error marshalling request: %v", err)
+ }
+
+ remReq := &pb.Request{
+ ServiceName: proto.String(service),
+ Method: proto.String(method),
+ Request: req,
+ // NOTE(djd): RequestId is unused in the server.
+ }
+
+ req, err = proto.Marshal(remReq)
+ if err != nil {
+ return fmt.Errorf("proto.Marshal: %v", err)
+ }
+
+ // TODO(djd): Respect ctx.Deadline()?
+ resp, err := c.client.Post(c.url, "application/octet-stream", bytes.NewReader(req))
+ if err != nil {
+ return fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return fmt.Errorf("failed reading response: %v", err)
+ }
+ remResp := &pb.Response{}
+ if err := proto.Unmarshal(body, remResp); err != nil {
+ return fmt.Errorf("error unmarshalling response: %v", err)
+ }
+
+ if ae := remResp.GetApplicationError(); ae != nil {
+ return &internal.APIError{
+ Code: ae.GetCode(),
+ Detail: ae.GetDetail(),
+ Service: service,
+ }
+ }
+
+ if remResp.Response == nil {
+ return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp))
+ }
+
+ return proto.Unmarshal(remResp.Response, out)
+}
+
+// This is a forgiving regexp designed to parse the app ID from YAML.
+var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`)
+
+func getAppID(client *http.Client, url string) (string, error) {
+ // Generate a pseudo-random token for handshaking.
+ token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
+ resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed reading response: %v", err)
+ }
+
+ // Check the token is present in response.
+ if !bytes.Contains(body, []byte(token)) {
+ return "", fmt.Errorf("token not found: want %q; body %q", token, body)
+ }
+
+ match := appIDRE.FindSubmatch(body)
+ if match == nil {
+ return "", fmt.Errorf("app ID not found: body %q", body)
+ }
+
+ return string(match[1]), nil
+}
+
+type headerAddingRoundTripper struct {
+ Wrapped http.RoundTripper
+}
+
+func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ r.Header.Set("X-Appcfg-Api-Version", "1")
+ return t.Wrapped.RoundTrip(r)
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/remote_api.go b/vendor/google.golang.org/appengine/remote_api/remote_api.go
new file mode 100644
index 000000000..e46157ef4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/remote_api.go
@@ -0,0 +1,149 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package remote_api implements the /_ah/remote_api endpoint.
+This endpoint is used by offline tools such as the bulk loader.
+*/
+package remote_api
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+func init() {
+ http.HandleFunc("/_ah/remote_api", handle)
+}
+
+func handle(w http.ResponseWriter, req *http.Request) {
+ c := appengine.NewContext(req)
+
+ u := user.Current(c)
+ if u == nil {
+ u, _ = user.CurrentOAuth(c, "")
+ }
+
+ if u == nil || !u.Admin {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusUnauthorized)
+ io.WriteString(w, "You must be logged in as an administrator to access this.\n")
+ return
+ }
+ if req.Header.Get("X-Appcfg-Api-Version") == "" {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusForbidden)
+ io.WriteString(w, "This request did not contain a necessary header.\n")
+ return
+ }
+
+ if req.Method != "POST" {
+ // Response must be YAML.
+ rtok := req.FormValue("rtok")
+ if rtok == "" {
+ rtok = "0"
+ }
+ w.Header().Set("Content-Type", "text/yaml; charset=utf-8")
+ fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok)
+ return
+ }
+
+ defer req.Body.Close()
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Failed reading body: %v", err)
+ return
+ }
+ remReq := &pb.Request{}
+ if err := proto.Unmarshal(body, remReq); err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Bad body: %v", err)
+ return
+ }
+
+ service, method := *remReq.ServiceName, *remReq.Method
+ if !requestSupported(service, method) {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Unsupported RPC /%s.%s", service, method)
+ return
+ }
+
+ rawReq := &rawMessage{remReq.Request}
+ rawRes := &rawMessage{}
+ err = internal.Call(c, service, method, rawReq, rawRes)
+
+ remRes := &pb.Response{}
+ if err == nil {
+ remRes.Response = rawRes.buf
+ } else if ae, ok := err.(*internal.APIError); ok {
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: &ae.Code,
+ Detail: &ae.Detail,
+ }
+ } else {
+ // This shouldn't normally happen.
+ log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err)
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: proto.Int32(0),
+ Detail: proto.String(err.Error()),
+ }
+ }
+ out, err := proto.Marshal(remRes)
+ if err != nil {
+ // This should not be possible.
+ w.WriteHeader(500)
+ log.Errorf(c, "proto.Marshal: %v", err)
+ return
+ }
+
+ log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method)
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(len(out)))
+ w.Write(out)
+}
+
+// rawMessage is a protocol buffer type that is already serialised.
+// This allows the remote_api code here to handle messages
+// without having to know the real type.
+type rawMessage struct {
+ buf []byte
+}
+
+func (rm *rawMessage) Marshal() ([]byte, error) {
+ return rm.buf, nil
+}
+
+func (rm *rawMessage) Unmarshal(buf []byte) error {
+ rm.buf = make([]byte, len(buf))
+ copy(rm.buf, buf)
+ return nil
+}
+
+func requestSupported(service, method string) bool {
+ // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py
+ switch service {
+ case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3",
+ "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore",
+ "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp":
+ return true
+ }
+ return false
+}
+
+// Methods to satisfy proto.Message.
+func (rm *rawMessage) Reset() { rm.buf = nil }
+func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) }
+func (*rawMessage) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go
new file mode 100644
index 000000000..b4c31c0c9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/field.go
@@ -0,0 +1,144 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Field is a name/value pair. A search index's document can be loaded and
+// saved as a sequence of Fields.
+type Field struct {
+ // Name is the field name.
+ Name string
+ // Value is the field value. The valid types are:
+ // - string,
+ // - search.Atom,
+ // - search.HTML,
+ // - time.Time (stored with millisecond precision),
+ // - float64,
+ // - GeoPoint.
+ Value interface{}
+ // Language is a two-letter ISO 693-1 code for the field's language,
+ // defaulting to "en" if nothing is specified. It may only be specified for
+ // fields of type string and search.HTML.
+ Language string
+ // Derived marks fields that were calculated as a result of a
+ // FieldExpression provided to Search. This field is ignored when saving a
+ // document.
+ Derived bool
+}
+
+// DocumentMetadata is a struct containing information describing a given document.
+type DocumentMetadata struct {
+ // Rank is an integer specifying the order the document will be returned in
+ // search results. If zero, the rank will be set to the number of seconds since
+ // 2011-01-01 00:00:00 UTC when being Put into an index.
+ Rank int
+}
+
+// FieldLoadSaver can be converted from and to a slice of Fields
+// with additional document metadata.
+type FieldLoadSaver interface {
+ Load([]Field, *DocumentMetadata) error
+ Save() ([]Field, *DocumentMetadata, error)
+}
+
+// FieldList converts a []Field to implement FieldLoadSaver.
+type FieldList []Field
+
+// Load loads all of the provided fields into l.
+// It does not first reset *l to an empty slice.
+func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {
+ *l = append(*l, f...)
+ return nil
+}
+
+// Save returns all of l's fields as a slice of Fields.
+func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {
+ return *l, nil, nil
+}
+
+var _ FieldLoadSaver = (*FieldList)(nil)
+
+// structFLS adapts a struct to be a FieldLoadSaver.
+type structFLS struct {
+ reflect.Value
+}
+
+func (s structFLS) Load(fields []Field, _ *DocumentMetadata) (err error) {
+ for _, field := range fields {
+ f := s.FieldByName(field.Name)
+ if !f.IsValid() {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "no such struct field",
+ }
+ continue
+ }
+ if !f.CanSet() {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "cannot set struct field",
+ }
+ continue
+ }
+ v := reflect.ValueOf(field.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt),
+ }
+ continue
+ }
+ f.Set(v)
+ }
+ return err
+}
+
+func (s structFLS) Save() ([]Field, *DocumentMetadata, error) {
+ fields := make([]Field, 0, s.NumField())
+ for i := 0; i < s.NumField(); i++ {
+ f := s.Field(i)
+ if !f.CanSet() {
+ continue
+ }
+ fields = append(fields, Field{
+ Name: s.Type().Field(i).Name,
+ Value: f.Interface(),
+ })
+ }
+ return fields, nil, nil
+}
+
+// newStructFLS returns a FieldLoadSaver for the struct pointer p.
+func newStructFLS(p interface{}) (FieldLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidDocumentType
+ }
+ return structFLS{v.Elem()}, nil
+}
+
+// LoadStruct loads the fields from f to dst. dst must be a struct pointer.
+func LoadStruct(dst interface{}, f []Field) error {
+ x, err := newStructFLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(f, nil)
+}
+
+// SaveStruct returns the fields from src as a slice of Field.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Field, error) {
+ x, err := newStructFLS(src)
+ if err != nil {
+ return nil, err
+ }
+ fs, _, err := x.Save()
+ return fs, err
+}
diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go
new file mode 100644
index 000000000..92b7a5bc3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search.go
@@ -0,0 +1,854 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package search provides a client for App Engine's search service.
+
+Indexes contains documents, and a document's contents are a mapping from case-
+sensitive field names to values. In Go, documents are represented by struct
+pointers, and the valid types for a struct's fields are:
+ - string,
+ - search.Atom,
+ - search.HTML,
+ - time.Time (stored with millisecond precision),
+ - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),
+ - appengine.GeoPoint.
+
+Documents can also be represented by any type implementing the FieldLoadSaver
+interface.
+
+Example code:
+
+ type Doc struct {
+ Author string
+ Comment string
+ Creation time.Time
+ }
+
+ index, err := search.Open("comments")
+ if err != nil {
+ return err
+ }
+ newID, err := index.Put(c, "", &Doc{
+ Author: "gopher",
+ Comment: "the truth of the matter",
+ Creation: time.Now(),
+ })
+ if err != nil {
+ return err
+ }
+
+Searching an index for a query will result in an iterator. As with an iterator
+from package datastore, pass a destination struct to Next to decode the next
+result. Next will return Done when the iterator is exhausted.
+
+ for t := index.Search(c, "Comment:truth", nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+Call List to iterate over documents.
+
+ for t := index.List(c, nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+A single document can also be retrieved by its ID. Pass a destination struct
+to Get to hold the resulting document.
+
+ var doc Doc
+ err := index.Get(c, id, &doc)
+ if err != nil {
+ return err
+ }
+
+Queries are expressed as strings, plus some optional parameters. The query
+language is described at
+https://cloud.google.com/appengine/docs/go/search/query_strings
+
+Note that in Go, field names come from the struct field definition and begin
+with an upper case letter.
+*/
+package search
+
+// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage??
+// TODO: Index.GetAll (or Iterator.GetAll)?
+// TODO: struct <-> protobuf tests.
+// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero
+// time.Time)? _MAXIMUM_STRING_LENGTH?
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+var (
+ // ErrInvalidDocumentType is returned when methods like Put, Get or Next
+ // are passed a dst or src argument of invalid type.
+ ErrInvalidDocumentType = errors.New("search: invalid document type")
+
+ // ErrNoSuchDocument is returned when no document was found for a given ID.
+ ErrNoSuchDocument = errors.New("search: no such document")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// than the one it was stored from, or when a field is missing or unexported in
+// the destination struct.
+type ErrFieldMismatch struct {
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason)
+}
+
+// Atom is a document field whose contents are indexed as a single indivisible
+// string.
+type Atom string
+
+// HTML is a document field whose contents are indexed as HTML. Only text nodes
+// are indexed: "foobar" will be treated as "foobar".
+type HTML string
+
+// validIndexNameOrDocID is the Go equivalent of Python's
+// _ValidateVisiblePrintableAsciiNotReserved.
+func validIndexNameOrDocID(s string) bool {
+ if strings.HasPrefix(s, "!") {
+ return false
+ }
+ for _, c := range s {
+ if c < 0x21 || 0x7f <= c {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ fieldNameRE = regexp.MustCompile(`^[A-Z][A-Za-z0-9_]*$`)
+ languageRE = regexp.MustCompile(`^[a-z]{2}$`)
+)
+
+// validFieldName is the Go equivalent of Python's _CheckFieldName.
+func validFieldName(s string) bool {
+ return len(s) <= 500 && fieldNameRE.MatchString(s)
+}
+
+// validDocRank checks that the ranks is in the range [0, 2^31).
+func validDocRank(r int) bool {
+ return 0 <= r && r <= (1<<31-1)
+}
+
+// validLanguage checks that a language looks like ISO 639-1.
+func validLanguage(s string) bool {
+ return languageRE.MatchString(s)
+}
+
+// validFloat checks that f is in the range [-2147483647, 2147483647].
+func validFloat(f float64) bool {
+ return -(1<<31-1) <= f && f <= (1<<31-1)
+}
+
+// Index is an index of documents.
+type Index struct {
+ spec pb.IndexSpec
+}
+
+// orderIDEpoch forms the basis for populating OrderId on documents.
+var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)
+
+// Open opens the index with the given name. The index is created if it does
+// not already exist.
+//
+// The name is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+func Open(name string) (*Index, error) {
+ if !validIndexNameOrDocID(name) {
+ return nil, fmt.Errorf("search: invalid index name %q", name)
+ }
+ return &Index{
+ spec: pb.IndexSpec{
+ Name: &name,
+ },
+ }, nil
+}
+
+// Put saves src to the index. If id is empty, a new ID is allocated by the
+// service and returned. If id is not empty, any existing index entry for that
+// ID is replaced.
+//
+// The ID is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+//
+// src must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) {
+ fields, meta, err := saveDoc(src)
+ if err != nil {
+ return "", err
+ }
+ d := &pb.Document{
+ Field: fields,
+ OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+ }
+ if meta != nil {
+ if meta.Rank != 0 {
+ if !validDocRank(meta.Rank) {
+ return "", fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
+ }
+ *d.OrderId = int32(meta.Rank)
+ }
+ }
+ if id != "" {
+ if !validIndexNameOrDocID(id) {
+ return "", fmt.Errorf("search: invalid ID %q", id)
+ }
+ d.Id = proto.String(id)
+ }
+ req := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{d},
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.IndexDocumentResponse{}
+ if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil {
+ return "", err
+ }
+ if len(res.Status) > 0 {
+ if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+ return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ }
+ }
+ if len(res.Status) != 1 || len(res.DocId) != 1 {
+ return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)",
+ len(res.Status), len(res.DocId))
+ }
+ return res.DocId[0], nil
+}
+
+// Get loads the document with the given ID into dst.
+//
+// The ID is a human-readable ASCII string. It must be non-empty, contain no
+// whitespace characters and not start with "!".
+//
+// dst must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer. It is up to the callee to decide whether this error
+// is fatal, recoverable, or ignorable.
+func (x *Index) Get(c context.Context, id string, dst interface{}) error {
+ if id == "" || !validIndexNameOrDocID(id) {
+ return fmt.Errorf("search: invalid ID %q", id)
+ }
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &x.spec,
+ StartDocId: proto.String(id),
+ Limit: proto.Int32(1),
+ },
+ }
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ if len(res.Document) != 1 || res.Document[0].GetId() != id {
+ return ErrNoSuchDocument
+ }
+ metadata := &DocumentMetadata{
+ Rank: int(res.Document[0].GetOrderId()),
+ }
+ return loadDoc(dst, res.Document[0].Field, nil, metadata)
+}
+
+// Delete deletes a document from the index.
+func (x *Index) Delete(c context.Context, id string) error {
+ req := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{id},
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.DeleteDocumentResponse{}
+ if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil {
+ return err
+ }
+ if len(res.Status) != 1 {
+ return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status))
+ }
+ if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ }
+ return nil
+}
+
+// List lists all of the documents in an index. The documents are returned in
+// increasing ID order.
+func (x *Index) List(c context.Context, opts *ListOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ count: -1,
+ listInclusive: true,
+ more: moreList,
+ limit: -1,
+ }
+ if opts != nil {
+ t.listStartID = opts.StartID
+ if opts.Limit > 0 {
+ t.limit = opts.Limit
+ }
+ t.idsOnly = opts.IDsOnly
+ }
+ return t
+}
+
+func moreList(t *Iterator) error {
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &t.index.spec,
+ },
+ }
+ if t.listStartID != "" {
+ req.Params.StartDocId = &t.listStartID
+ req.Params.IncludeStartDoc = &t.listInclusive
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.listRes = res.Document
+ t.listStartID, t.listInclusive, t.more = "", false, nil
+ if len(res.Document) != 0 {
+ if id := res.Document[len(res.Document)-1].GetId(); id != "" {
+ t.listStartID, t.more = id, moreList
+ }
+ }
+ return nil
+}
+
+// ListOptions are the options for listing documents in an index. Passing a nil
+// *ListOptions is equivalent to using the default values.
+type ListOptions struct {
+ // StartID is the inclusive lower bound for the ID of the returned
+ // documents. The zero value means all documents will be returned.
+ StartID string
+
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the list
+ // operation; no document fields are populated.
+ IDsOnly bool
+}
+
+// Search searches the index for the given query.
+func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ searchQuery: query,
+ more: moreSearch,
+ limit: -1,
+ }
+ if opts != nil {
+ if opts.Limit > 0 {
+ t.limit = opts.Limit
+ }
+ t.fields = opts.Fields
+ t.idsOnly = opts.IDsOnly
+ t.sort = opts.Sort
+ t.exprs = opts.Expressions
+ }
+ return t
+}
+
+func moreSearch(t *Iterator) error {
+ req := &pb.SearchRequest{
+ Params: &pb.SearchParams{
+ IndexSpec: &t.index.spec,
+ Query: &t.searchQuery,
+ CursorType: pb.SearchParams_SINGLE.Enum(),
+ FieldSpec: &pb.FieldSpec{
+ Name: t.fields,
+ },
+ },
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+ if t.sort != nil {
+ if err := sortToProto(t.sort, req.Params); err != nil {
+ return err
+ }
+ }
+ for _, e := range t.exprs {
+ req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{
+ Name: proto.String(e.Name),
+ Expression: proto.String(e.Expr),
+ })
+ }
+
+ if t.searchCursor != nil {
+ req.Params.Cursor = t.searchCursor
+ }
+ res := &pb.SearchResponse{}
+ if err := internal.Call(t.c, "search", "Search", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.searchRes = res.Result
+ t.count = int(*res.MatchedCount)
+ if res.Cursor != nil {
+ t.searchCursor, t.more = res.Cursor, moreSearch
+ } else {
+ t.searchCursor, t.more = nil, nil
+ }
+ return nil
+}
+
+// SearchOptions are the options for searching an index. Passing a nil
+// *SearchOptions is equivalent to using the default values.
+type SearchOptions struct {
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the search
+ // operation; no document fields are populated.
+ IDsOnly bool
+
+ // Sort controls the ordering of search results.
+ Sort *SortOptions
+
+ // Fields specifies which document fields to include in the results. If omitted,
+ // all document fields are returned. No more than 100 fields may be specified.
+ Fields []string
+
+ // Expressions specifies additional computed fields to add to each returned
+ // document.
+ Expressions []FieldExpression
+
+ // TODO: cursor, offset, maybe others.
+}
+
+// FieldExpression defines a custom expression to evaluate for each result.
+type FieldExpression struct {
+ // Name is the name to use for the computed field.
+ Name string
+
+ // Expr is evaluated to provide a custom content snippet for each document.
+ // See https://cloud.google.com/appengine/docs/go/search/options for
+ // the supported expression syntax.
+ Expr string
+}
+
+// SortOptions control the ordering and scoring of search results.
+type SortOptions struct {
+ // Expressions is a slice of expressions representing a multi-dimensional
+ // sort.
+ Expressions []SortExpression
+
+ // Scorer, when specified, will cause the documents to be scored according to
+ // search term frequency.
+ Scorer Scorer
+
+ // Limit is the maximum number of objects to score and/or sort. Limit cannot
+ // be more than 10,000. The zero value indicates a default limit.
+ Limit int
+}
+
+// SortExpression defines a single dimension for sorting a document.
+type SortExpression struct {
+ // Expr is evaluated to provide a sorting value for each document.
+ // See https://cloud.google.com/appengine/docs/go/search/options for
+ // the supported expression syntax.
+ Expr string
+
+ // Reverse causes the documents to be sorted in ascending order.
+ Reverse bool
+
+ // The default value to use when no field is present or the expresion
+ // cannot be calculated for a document. For text sorts, Default must
+ // be of type string; for numeric sorts, float64.
+ Default interface{}
+}
+
+// A Scorer defines how a document is scored.
+type Scorer interface {
+ toProto(*pb.ScorerSpec)
+}
+
+type enumScorer struct {
+ enum pb.ScorerSpec_Scorer
+}
+
+func (e enumScorer) toProto(spec *pb.ScorerSpec) {
+ spec.Scorer = e.enum.Enum()
+}
+
+var (
+ // MatchScorer assigns a score based on term frequency in a document.
+ MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}
+
+ // RescoringMatchScorer assigns a score based on the quality of the query
+ // match. It is similar to a MatchScorer but uses a more complex scoring
+ // algorithm based on match term frequency and other factors like field type.
+ // Please be aware that this algorithm is continually refined and can change
+ // over time without notice. This means that the ordering of search results
+ // that use this scorer can also change without notice.
+ RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}
+)
+
+func sortToProto(sort *SortOptions, params *pb.SearchParams) error {
+ for _, e := range sort.Expressions {
+ spec := &pb.SortSpec{
+ SortExpression: proto.String(e.Expr),
+ }
+ if e.Reverse {
+ spec.SortDescending = proto.Bool(false)
+ }
+ if e.Default != nil {
+ switch d := e.Default.(type) {
+ case float64:
+ spec.DefaultValueNumeric = &d
+ case string:
+ spec.DefaultValueText = &d
+ default:
+ return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr)
+ }
+ }
+ params.SortSpec = append(params.SortSpec, spec)
+ }
+
+ spec := &pb.ScorerSpec{}
+ if sort.Limit > 0 {
+ spec.Limit = proto.Int32(int32(sort.Limit))
+ params.ScorerSpec = spec
+ }
+ if sort.Scorer != nil {
+ sort.Scorer.toProto(spec)
+ params.ScorerSpec = spec
+ }
+
+ return nil
+}
+
+// Iterator is the result of searching an index for a query or listing an
+// index.
+type Iterator struct {
+ c context.Context
+ index *Index
+ err error
+
+ listRes []*pb.Document
+ listStartID string
+ listInclusive bool
+
+ searchRes []*pb.SearchResult
+ searchQuery string
+ searchCursor *string
+ sort *SortOptions
+
+ fields []string
+ exprs []FieldExpression
+
+ more func(*Iterator) error
+
+ count int
+ limit int // items left to return; -1 for unlimited.
+ idsOnly bool
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("search: query has no more results")
+
+// Count returns an approximation of the number of documents matched by the
+// query. It is only valid to call for iterators returned by Search.
+func (t *Iterator) Count() int { return t.count }
+
+// Next returns the ID of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// dst must be a non-nil struct pointer, implement the FieldLoadSaver
+// interface, or be a nil interface value. If a non-nil dst is provided, it
+// will be filled with the indexed fields. dst is ignored if this iterator was
+// created with an IDsOnly option.
+func (t *Iterator) Next(dst interface{}) (string, error) {
+ if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {
+ t.err = t.more(t)
+ }
+ if t.err != nil {
+ return "", t.err
+ }
+
+ var doc *pb.Document
+ var exprs []*pb.Field
+ switch {
+ case len(t.listRes) != 0:
+ doc = t.listRes[0]
+ t.listRes = t.listRes[1:]
+ case len(t.searchRes) != 0:
+ doc = t.searchRes[0].Document
+ exprs = t.searchRes[0].Expression
+ t.searchRes = t.searchRes[1:]
+ default:
+ return "", Done
+ }
+ if doc == nil {
+ return "", errors.New("search: internal error: no document returned")
+ }
+ if !t.idsOnly && dst != nil {
+ metadata := &DocumentMetadata{
+ Rank: int(doc.GetOrderId()),
+ }
+ if err := loadDoc(dst, doc.Field, exprs, metadata); err != nil {
+ return "", err
+ }
+ }
+ if t.limit > 0 {
+ t.limit--
+ if t.limit == 0 {
+ t.more = nil // prevent further fetches
+ }
+ }
+ return doc.GetId(), nil
+}
+
+// saveDoc converts from a struct pointer or FieldLoadSaver to protobufs.
+func saveDoc(src interface{}) ([]*pb.Field, *DocumentMetadata, error) {
+ var err error
+ var fields []Field
+ var meta *DocumentMetadata
+ switch x := src.(type) {
+ case FieldLoadSaver:
+ fields, meta, err = x.Save()
+ default:
+ fields, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ f, err := fieldsToProto(fields)
+ return f, meta, err
+}
+
+func fieldsToProto(src []Field) ([]*pb.Field, error) {
+ // Maps to catch duplicate time or numeric fields.
+ timeFields, numericFields := make(map[string]bool), make(map[string]bool)
+ dst := make([]*pb.Field, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid field name %q", f.Name)
+ }
+ fieldValue := &pb.FieldValue{}
+ switch x := f.Value.(type) {
+ case string:
+ fieldValue.Type = pb.FieldValue_TEXT.Enum()
+ fieldValue.StringValue = proto.String(x)
+ case Atom:
+ fieldValue.Type = pb.FieldValue_ATOM.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case HTML:
+ fieldValue.Type = pb.FieldValue_HTML.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case time.Time:
+ if timeFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate time field %q", f.Name)
+ }
+ timeFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_DATE.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))
+ case float64:
+ if numericFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name)
+ }
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x)
+ }
+ numericFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_NUMBER.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ case appengine.GeoPoint:
+ if !x.Valid() {
+ return nil, fmt.Errorf(
+ "search: GeoPoint field %q with invalid value %v",
+ f.Name, x)
+ }
+ fieldValue.Type = pb.FieldValue_GEO.Enum()
+ fieldValue.Geo = &pb.FieldValue_Geo{
+ Lat: proto.Float64(x.Lat),
+ Lng: proto.Float64(x.Lng),
+ }
+ default:
+ return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value))
+ }
+ if f.Language != "" {
+ switch f.Value.(type) {
+ case string, HTML:
+ if !validLanguage(f.Language) {
+ return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language)
+ }
+ fieldValue.Language = proto.String(f.Language)
+ default:
+ return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value)
+ }
+ }
+ if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {
+ return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p)
+ }
+ dst = append(dst, &pb.Field{
+ Name: proto.String(f.Name),
+ Value: fieldValue,
+ })
+ }
+ return dst, nil
+}
+
+// loadDoc converts from protobufs and document metadata to a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver. Two slices of fields may be provided:
+// src represents the document's stored fields; exprs is the derived expressions
+// requested by the developer. The latter may be empty.
+func loadDoc(dst interface{}, src, exprs []*pb.Field, meta *DocumentMetadata) (err error) {
+ fields, err := protoToFields(src)
+ if err != nil {
+ return err
+ }
+ if len(exprs) > 0 {
+ exprFields, err := protoToFields(exprs)
+ if err != nil {
+ return err
+ }
+ // Mark each field as derived.
+ for i := range exprFields {
+ exprFields[i].Derived = true
+ }
+ fields = append(fields, exprFields...)
+ }
+ switch x := dst.(type) {
+ case FieldLoadSaver:
+ return x.Load(fields, meta)
+ default:
+ return LoadStruct(dst, fields)
+ }
+}
+
+func protoToFields(fields []*pb.Field) ([]Field, error) {
+ dst := make([]Field, 0, len(fields))
+ for _, field := range fields {
+ fieldValue := field.GetValue()
+ f := Field{
+ Name: field.GetName(),
+ }
+ switch fieldValue.GetType() {
+ case pb.FieldValue_TEXT:
+ f.Value = fieldValue.GetStringValue()
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_ATOM:
+ f.Value = Atom(fieldValue.GetStringValue())
+ case pb.FieldValue_HTML:
+ f.Value = HTML(fieldValue.GetStringValue())
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_DATE:
+ sv := fieldValue.GetStringValue()
+ millis, err := strconv.ParseInt(sv, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err)
+ }
+ f.Value = time.Unix(0, millis*1e6)
+ case pb.FieldValue_NUMBER:
+ sv := fieldValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ case pb.FieldValue_GEO:
+ geoValue := fieldValue.GetGeo()
+ geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}
+ if !geoPoint.Valid() {
+ return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint)
+ }
+ f.Value = geoPoint
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ set := func(s **string) {
+ if *s == nil {
+ *s = &namespace
+ }
+ }
+ switch m := m.(type) {
+ case *pb.IndexDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.ListDocumentsRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.DeleteDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.SearchRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name)
+ internal.NamespaceMods["search"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/ns_classic.go b/vendor/google.golang.org/appengine/taskqueue/ns_classic.go
new file mode 100644
index 000000000..f8aa96e4d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/ns_classic.go
@@ -0,0 +1,18 @@
+// +build appengine
+
+package taskqueue
+
+import (
+ basepb "appengine_internal/base"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func getDefaultNamespace(ctx context.Context) string {
+ c := internal.ClassicContextFromContext(ctx)
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetDefaultNamespace", &basepb.VoidProto{}, s, nil)
+ return s.GetValue()
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/ns_vm.go b/vendor/google.golang.org/appengine/taskqueue/ns_vm.go
new file mode 100644
index 000000000..af3d84130
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/ns_vm.go
@@ -0,0 +1,13 @@
+// +build !appengine
+
+package taskqueue
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func getDefaultNamespace(ctx context.Context) string {
+ return internal.IncomingHeaders(ctx).Get(defaultNamespace)
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
new file mode 100644
index 000000000..848ea5f44
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
@@ -0,0 +1,492 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package taskqueue provides a client for App Engine's taskqueue service.
+Using this service, applications may perform work outside a user's request.
+
+A Task may be constructed manually; alternatively, since the most common
+taskqueue operation is to add a single POST task, NewPOSTTask makes it easy.
+
+ t := taskqueue.NewPOSTTask("/worker", url.Values{
+ "key": {key},
+ })
+ taskqueue.Add(c, t, "") // add t to the default queue
+*/
+package taskqueue
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ dspb "google.golang.org/appengine/internal/datastore"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+var (
+ // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name.
+ ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added")
+)
+
+// RetryOptions let you control whether to retry a task and the backoff intervals between tries.
+type RetryOptions struct {
+ // Number of tries/leases after which the task fails permanently and is deleted.
+ // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently.
+ RetryLimit int32
+
+ // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks).
+ // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently.
+ AgeLimit time.Duration
+
+ // Minimum time between successive tries (only for push tasks).
+ MinBackoff time.Duration
+
+ // Maximum time between successive tries (only for push tasks).
+ MaxBackoff time.Duration
+
+ // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks).
+ MaxDoublings int32
+
+ // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value.
+ // Otherwise a zero MaxDoublings is ignored and the default is used.
+ ApplyZeroMaxDoublings bool
+}
+
+// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters.
+func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters {
+ params := &pb.TaskQueueRetryParameters{}
+ if opt.RetryLimit > 0 {
+ params.RetryLimit = proto.Int32(opt.RetryLimit)
+ }
+ if opt.AgeLimit > 0 {
+ params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds()))
+ }
+ if opt.MinBackoff > 0 {
+ params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds())
+ }
+ if opt.MaxBackoff > 0 {
+ params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds())
+ }
+ if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) {
+ params.MaxDoublings = proto.Int32(opt.MaxDoublings)
+ }
+ return params
+}
+
+// A Task represents a task to be executed.
+type Task struct {
+ // Path is the worker URL for the task.
+ // If unset, it will default to /_ah/queue/.
+ Path string
+
+ // Payload is the data for the task.
+ // This will be delivered as the HTTP request body.
+ // It is only used when Method is POST, PUT or PULL.
+ // url.Values' Encode method may be used to generate this for POST requests.
+ Payload []byte
+
+ // Additional HTTP headers to pass at the task's execution time.
+ // To schedule the task to be run with an alternate app version
+ // or backend, set the "Host" header.
+ Header http.Header
+
+ // Method is the HTTP method for the task ("GET", "POST", etc.),
+ // or "PULL" if this is task is destined for a pull-based queue.
+ // If empty, this defaults to "POST".
+ Method string
+
+ // A name for the task.
+ // If empty, a name will be chosen.
+ Name string
+
+ // Delay specifies the duration the task queue service must wait
+ // before executing the task.
+ // Either Delay or ETA may be set, but not both.
+ Delay time.Duration
+
+ // ETA specifies the earliest time a task may be executed (push queues)
+ // or leased (pull queues).
+ // Either Delay or ETA may be set, but not both.
+ ETA time.Time
+
+ // The number of times the task has been dispatched or leased.
+ RetryCount int32
+
+ // Tag for the task. Only used when Method is PULL.
+ Tag string
+
+ // Retry options for this task. May be nil.
+ RetryOptions *RetryOptions
+}
+
+func (t *Task) method() string {
+ if t.Method == "" {
+ return "POST"
+ }
+ return t.Method
+}
+
+// NewPOSTTask creates a Task that will POST to a path with the given form data.
+func NewPOSTTask(path string, params url.Values) *Task {
+ h := make(http.Header)
+ h.Set("Content-Type", "application/x-www-form-urlencoded")
+ return &Task{
+ Path: path,
+ Payload: []byte(params.Encode()),
+ Header: h,
+ Method: "POST",
+ }
+}
+
+var (
+ currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace")
+)
+
+func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ path := task.Path
+ if path == "" {
+ path = "/_ah/queue/" + queueName
+ }
+ eta := task.ETA
+ if eta.IsZero() {
+ eta = time.Now().Add(task.Delay)
+ } else if task.Delay != 0 {
+ panic("taskqueue: both Delay and ETA are set")
+ }
+ req := &pb.TaskQueueAddRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(eta.UnixNano() / 1e3),
+ }
+ method := task.method()
+ if method == "PULL" {
+ // Pull-based task
+ req.Body = task.Payload
+ req.Mode = pb.TaskQueueMode_PULL.Enum()
+ if task.Tag != "" {
+ req.Tag = []byte(task.Tag)
+ }
+ } else {
+ // HTTP-based task
+ if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok {
+ req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum()
+ } else {
+ return nil, fmt.Errorf("taskqueue: bad method %q", method)
+ }
+ req.Url = []byte(path)
+ for k, vs := range task.Header {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(k),
+ Value: []byte(v),
+ })
+ }
+ }
+ if method == "POST" || method == "PUT" {
+ req.Body = task.Payload
+ }
+
+ // Namespace headers.
+ if _, ok := task.Header[currentNamespace]; !ok {
+ // Fetch the current namespace of this request.
+ ns := internal.NamespaceFromContext(c)
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(currentNamespace),
+ Value: []byte(ns),
+ })
+ }
+ if _, ok := task.Header[defaultNamespace]; !ok {
+ // Fetch the X-AppEngine-Default-Namespace header of this request.
+ if ns := getDefaultNamespace(c); ns != "" {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(defaultNamespace),
+ Value: []byte(ns),
+ })
+ }
+ }
+ }
+
+ if task.RetryOptions != nil {
+ req.RetryParameters = task.RetryOptions.toRetryParameters()
+ }
+
+ return req, nil
+}
+
+var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{
+ pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true,
+ pb.TaskQueueServiceError_TOMBSTONED_TASK: true,
+}
+
+// Add adds the task to a named queue.
+// An empty queue name means that the default queue will be used.
+// Add returns an equivalent Task with defaults filled in, including setting
+// the task's Name field to the chosen name if the original was empty.
+func Add(c context.Context, task *Task, queueName string) (*Task, error) {
+ req, err := newAddReq(c, task, queueName)
+ if err != nil {
+ return nil, err
+ }
+ res := &pb.TaskQueueAddResponse{}
+ if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil {
+ apiErr, ok := err.(*internal.APIError)
+ if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] {
+ return nil, ErrTaskAlreadyAdded
+ }
+ return nil, err
+ }
+ resultTask := *task
+ resultTask.Method = task.method()
+ if task.Name == "" {
+ resultTask.Name = string(res.ChosenTaskName)
+ }
+ return &resultTask, nil
+}
+
+// AddMulti adds multiple tasks to a named queue.
+// An empty queue name means that the default queue will be used.
+// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting
+// each task's Name field to the chosen name if the original was empty.
+// If a given task is badly formed or could not be added, an appengine.MultiError is returned.
+func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) {
+ req := &pb.TaskQueueBulkAddRequest{
+ AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)),
+ }
+ me, any := make(appengine.MultiError, len(tasks)), false
+ for i, t := range tasks {
+ req.AddRequest[i], me[i] = newAddReq(c, t, queueName)
+ any = any || me[i] != nil
+ }
+ if any {
+ return nil, me
+ }
+ res := &pb.TaskQueueBulkAddResponse{}
+ if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil {
+ return nil, err
+ }
+ if len(res.Taskresult) != len(tasks) {
+ return nil, errors.New("taskqueue: server error")
+ }
+ tasksOut := make([]*Task, len(tasks))
+ for i, tr := range res.Taskresult {
+ tasksOut[i] = new(Task)
+ *tasksOut[i] = *tasks[i]
+ tasksOut[i].Method = tasksOut[i].method()
+ if tasksOut[i].Name == "" {
+ tasksOut[i].Name = string(tr.ChosenTaskName)
+ }
+ if *tr.Result != pb.TaskQueueServiceError_OK {
+ if alreadyAddedErrors[*tr.Result] {
+ me[i] = ErrTaskAlreadyAdded
+ } else {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(*tr.Result),
+ }
+ }
+ any = true
+ }
+ }
+ if any {
+ return tasksOut, me
+ }
+ return tasksOut, nil
+}
+
+// Delete deletes a task from a named queue.
+func Delete(c context.Context, task *Task, queueName string) error {
+ err := DeleteMulti(c, []*Task{task}, queueName)
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti deletes multiple tasks from a named queue.
+// If a given task could not be deleted, an appengine.MultiError is returned.
+func DeleteMulti(c context.Context, tasks []*Task, queueName string) error {
+ taskNames := make([][]byte, len(tasks))
+ for i, t := range tasks {
+ taskNames[i] = []byte(t.Name)
+ }
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueDeleteRequest{
+ QueueName: []byte(queueName),
+ TaskName: taskNames,
+ }
+ res := &pb.TaskQueueDeleteResponse{}
+ if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil {
+ return err
+ }
+ if a, b := len(req.TaskName), len(res.Result); a != b {
+ return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b)
+ }
+ me, any := make(appengine.MultiError, len(res.Result)), false
+ for i, ec := range res.Result {
+ if ec != pb.TaskQueueServiceError_OK {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(ec),
+ }
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueQueryAndOwnTasksRequest{
+ QueueName: []byte(queueName),
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ MaxTasks: proto.Int64(int64(maxTasks)),
+ GroupByTag: proto.Bool(groupByTag),
+ Tag: tag,
+ }
+ res := &pb.TaskQueueQueryAndOwnTasksResponse{}
+ if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil {
+ return nil, err
+ }
+ tasks := make([]*Task, len(res.Task))
+ for i, t := range res.Task {
+ tasks[i] = &Task{
+ Payload: t.Body,
+ Name: string(t.TaskName),
+ Method: "PULL",
+ ETA: time.Unix(0, *t.EtaUsec*1e3),
+ RetryCount: *t.RetryCount,
+ Tag: string(t.Tag),
+ }
+ }
+ return tasks, nil
+}
+
+// Lease leases tasks from a queue.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, false, nil)
+}
+
+// LeaseByTag leases tasks from a queue, grouped by tag.
+// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag))
+}
+
+// Purge removes all tasks from a queue.
+func Purge(c context.Context, queueName string) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueuePurgeQueueRequest{
+ QueueName: []byte(queueName),
+ }
+ res := &pb.TaskQueuePurgeQueueResponse{}
+ return internal.Call(c, "taskqueue", "PurgeQueue", req, res)
+}
+
+// ModifyLease modifies the lease of a task.
+// Used to request more processing time, or to abandon processing.
+// leaseTime is in seconds and must not be negative.
+func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueModifyTaskLeaseRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership.
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ }
+ res := &pb.TaskQueueModifyTaskLeaseResponse{}
+ if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil {
+ return err
+ }
+ task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3)
+ return nil
+}
+
+// QueueStatistics represents statistics about a single task queue.
+type QueueStatistics struct {
+ Tasks int // may be an approximation
+ OldestETA time.Time // zero if there are no pending tasks
+
+ Executed1Minute int // tasks executed in the last minute
+ InFlight int // tasks executing now
+ EnforcedRate float64 // requests per second
+}
+
+// QueueStats retrieves statistics about queues.
+func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) {
+ req := &pb.TaskQueueFetchQueueStatsRequest{
+ QueueName: make([][]byte, len(queueNames)),
+ }
+ for i, q := range queueNames {
+ if q == "" {
+ q = "default"
+ }
+ req.QueueName[i] = []byte(q)
+ }
+ res := &pb.TaskQueueFetchQueueStatsResponse{}
+ if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil {
+ return nil, err
+ }
+ qs := make([]QueueStatistics, len(res.Queuestats))
+ for i, qsg := range res.Queuestats {
+ qs[i] = QueueStatistics{
+ Tasks: int(*qsg.NumTasks),
+ }
+ if eta := *qsg.OldestEtaUsec; eta > -1 {
+ qs[i].OldestETA = time.Unix(0, eta*1e3)
+ }
+ if si := qsg.ScannerInfo; si != nil {
+ qs[i].Executed1Minute = int(*si.ExecutedLastMinute)
+ qs[i].InFlight = int(si.GetRequestsInFlight())
+ qs[i].EnforcedRate = si.GetEnforcedRate()
+ }
+ }
+ return qs, nil
+}
+
+func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) {
+ x.Transaction = t
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name)
+
+ // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue.
+ dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT)
+ internal.RegisterTimeoutErrorCode("taskqueue", dsCode)
+
+ // Transaction registration.
+ internal.RegisterTransactionSetter(setTransaction)
+ internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) {
+ for _, req := range x.AddRequest {
+ setTransaction(req, t)
+ }
+ })
+}
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 000000000..05642a992
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+ if err == context.DeadlineExceeded {
+ return true
+ }
+ if t, ok := err.(interface {
+ IsTimeout() bool
+ }); ok {
+ return t.IsTimeout()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 000000000..ba3d17c71
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go
new file mode 100644
index 000000000..727a080d5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/oauth.go
@@ -0,0 +1,48 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// CurrentOAuth returns the user associated with the OAuth consumer making this
+// request. If the OAuth consumer did not make a valid OAuth request, or the
+// scope is non-empty and the current user does not have this scope, this method
+// will return an error.
+func CurrentOAuth(c context.Context, scope string) (*User, error) {
+ req := &pb.GetOAuthUserRequest{}
+ if scope != "" {
+ req.Scope = &scope
+ }
+ res := &pb.GetOAuthUserResponse{}
+
+ err := internal.Call(c, "user", "GetOAuthUser", req, res)
+ if err != nil {
+ return nil, err
+ }
+ return &User{
+ Email: *res.Email,
+ AuthDomain: *res.AuthDomain,
+ Admin: res.GetIsAdmin(),
+ ID: *res.UserId,
+ }, nil
+}
+
+// OAuthConsumerKey returns the OAuth consumer key provided with the current
+// request. This method will return an error if the OAuth request was invalid.
+func OAuthConsumerKey(c context.Context) (string, error) {
+ req := &pb.CheckOAuthSignatureRequest{}
+ res := &pb.CheckOAuthSignatureResponse{}
+
+ err := internal.Call(c, "user", "CheckOAuthSignature", req, res)
+ if err != nil {
+ return "", err
+ }
+ return *res.OauthConsumerKey, err
+}
diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go
new file mode 100644
index 000000000..619f53572
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user.go
@@ -0,0 +1,80 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package user provides a client for App Engine's user authentication service.
+package user
+
+import (
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// User represents a user of the application.
+type User struct {
+ Email string
+ AuthDomain string
+ Admin bool
+
+ // ID is the unique permanent ID of the user.
+ // It is populated if the Email is associated
+ // with a Google account, or empty otherwise.
+ ID string
+
+ FederatedIdentity string
+ FederatedProvider string
+}
+
+// String returns a displayable name for the user.
+func (u *User) String() string {
+ if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) {
+ return u.Email[:len(u.Email)-len("@"+u.AuthDomain)]
+ }
+ if u.FederatedIdentity != "" {
+ return u.FederatedIdentity
+ }
+ return u.Email
+}
+
+// LoginURL returns a URL that, when visited, prompts the user to sign in,
+// then redirects the user to the URL specified by dest.
+func LoginURL(c context.Context, dest string) (string, error) {
+ return LoginURLFederated(c, dest, "")
+}
+
+// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.
+func LoginURLFederated(c context.Context, dest, identity string) (string, error) {
+ req := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ if identity != "" {
+ req.FederatedIdentity = proto.String(identity)
+ }
+ res := &pb.CreateLoginURLResponse{}
+ if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LoginUrl, nil
+}
+
+// LogoutURL returns a URL that, when visited, signs the user out,
+// then redirects the user to the URL specified by dest.
+func LogoutURL(c context.Context, dest string) (string, error) {
+ req := &pb.CreateLogoutURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ res := &pb.CreateLogoutURLResponse{}
+ if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LogoutUrl, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go
new file mode 100644
index 000000000..9f37a08cf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_classic.go
@@ -0,0 +1,31 @@
+// +build appengine
+
+package user
+
+import (
+ "appengine/user"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func Current(ctx context.Context) *User {
+ u := user.Current(internal.ClassicContextFromContext(ctx))
+ if u == nil {
+ return nil
+ }
+ // Map appengine/user.User to this package's User type.
+ return &User{
+ Email: u.Email,
+ AuthDomain: u.AuthDomain,
+ Admin: u.Admin,
+ ID: u.ID,
+ FederatedIdentity: u.FederatedIdentity,
+ FederatedProvider: u.FederatedProvider,
+ }
+}
+
+func IsAdmin(ctx context.Context) bool {
+ return user.IsAdmin(internal.ClassicContextFromContext(ctx))
+}
diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go
new file mode 100644
index 000000000..38dc6324a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_vm.go
@@ -0,0 +1,34 @@
+// +build !appengine
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Current returns the currently logged-in user,
+// or nil if the user is not signed in.
+func Current(c context.Context) *User {
+ h := internal.IncomingHeaders(c)
+ u := &User{
+ Email: h.Get("X-AppEngine-User-Email"),
+ AuthDomain: h.Get("X-AppEngine-Auth-Domain"),
+ ID: h.Get("X-AppEngine-User-Id"),
+ Admin: h.Get("X-AppEngine-User-Is-Admin") == "1",
+ FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"),
+ FederatedProvider: h.Get("X-AppEngine-Federated-Provider"),
+ }
+ if u.Email == "" && u.FederatedIdentity == "" {
+ return nil
+ }
+ return u
+}
+
+// IsAdmin returns true if the current user is signed in and
+// is currently registered as an administrator of the application.
+func IsAdmin(c context.Context) bool {
+ h := internal.IncomingHeaders(c)
+ return h.Get("X-AppEngine-User-Is-Admin") == "1"
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp.go b/vendor/google.golang.org/appengine/xmpp/xmpp.go
new file mode 100644
index 000000000..bbc27538a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp.go
@@ -0,0 +1,253 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package xmpp provides the means to send and receive instant messages
+to and from users of XMPP-compatible services.
+
+To send a message,
+ m := &xmpp.Message{
+ To: []string{"kaylee@example.com"},
+ Body: `Hi! How's the carrot?`,
+ }
+ err := m.Send(c)
+
+To receive messages,
+ func init() {
+ xmpp.Handle(handleChat)
+ }
+
+ func handleChat(c context.Context, m *xmpp.Message) {
+ // ...
+ }
+*/
+package xmpp
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+// Message represents an incoming chat message.
+type Message struct {
+ // Sender is the JID of the sender.
+ // Optional for outgoing messages.
+ Sender string
+
+ // To is the intended recipients of the message.
+ // Incoming messages will have exactly one element.
+ To []string
+
+ // Body is the body of the message.
+ Body string
+
+ // Type is the message type, per RFC 3921.
+ // It defaults to "chat".
+ Type string
+
+ // RawXML is whether the body contains raw XML.
+ RawXML bool
+}
+
+// Presence represents an outgoing presence update.
+type Presence struct {
+ // Sender is the JID (optional).
+ Sender string
+
+ // The intended recipient of the presence update.
+ To string
+
+ // Type, per RFC 3921 (optional). Defaults to "available".
+ Type string
+
+ // State of presence (optional).
+ // Valid values: "away", "chat", "xa", "dnd" (RFC 3921).
+ State string
+
+ // Free text status message (optional).
+ Status string
+}
+
+var (
+ ErrPresenceUnavailable = errors.New("xmpp: presence unavailable")
+ ErrInvalidJID = errors.New("xmpp: invalid JID")
+)
+
+// Handle arranges for f to be called for incoming XMPP messages.
+// Only messages of type "chat" or "normal" will be handled.
+func Handle(f func(c context.Context, m *Message)) {
+ http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) {
+ f(appengine.NewContext(r), &Message{
+ Sender: r.FormValue("from"),
+ To: []string{r.FormValue("to")},
+ Body: r.FormValue("body"),
+ })
+ })
+}
+
+// Send sends a message.
+// If any failures occur with specific recipients, the error will be an appengine.MultiError.
+func (m *Message) Send(c context.Context) error {
+ req := &pb.XmppMessageRequest{
+ Jid: m.To,
+ Body: &m.Body,
+ RawXml: &m.RawXML,
+ }
+ if m.Type != "" && m.Type != "chat" {
+ req.Type = &m.Type
+ }
+ if m.Sender != "" {
+ req.FromJid = &m.Sender
+ }
+ res := &pb.XmppMessageResponse{}
+ if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil {
+ return err
+ }
+
+ if len(res.Status) != len(req.Jid) {
+ return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status))
+ }
+ me, any := make(appengine.MultiError, len(req.Jid)), false
+ for i, st := range res.Status {
+ if st != pb.XmppMessageResponse_NO_ERROR {
+ me[i] = errors.New(st.String())
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Invite sends an invitation. If the from address is an empty string
+// the default (yourapp@appspot.com/bot) will be used.
+func Invite(c context.Context, to, from string) error {
+ req := &pb.XmppInviteRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.XmppInviteResponse{}
+ return internal.Call(c, "xmpp", "SendInvite", req, res)
+}
+
+// Send sends a presence update.
+func (p *Presence) Send(c context.Context) error {
+ req := &pb.XmppSendPresenceRequest{
+ Jid: &p.To,
+ }
+ if p.State != "" {
+ req.Show = &p.State
+ }
+ if p.Type != "" {
+ req.Type = &p.Type
+ }
+ if p.Sender != "" {
+ req.FromJid = &p.Sender
+ }
+ if p.Status != "" {
+ req.Status = &p.Status
+ }
+ res := &pb.XmppSendPresenceResponse{}
+ return internal.Call(c, "xmpp", "SendPresence", req, res)
+}
+
+var presenceMap = map[pb.PresenceResponse_SHOW]string{
+ pb.PresenceResponse_NORMAL: "",
+ pb.PresenceResponse_AWAY: "away",
+ pb.PresenceResponse_DO_NOT_DISTURB: "dnd",
+ pb.PresenceResponse_CHAT: "chat",
+ pb.PresenceResponse_EXTENDED_AWAY: "xa",
+}
+
+// GetPresence retrieves a user's presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// ErrPresenceUnavailable is returned if the presence is unavailable.
+func GetPresence(c context.Context, to string, from string) (string, error) {
+ req := &pb.PresenceRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.PresenceResponse{}
+ if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil {
+ return "", err
+ }
+ if !*res.IsAvailable || res.Presence == nil {
+ return "", ErrPresenceUnavailable
+ }
+ presence, ok := presenceMap[*res.Presence]
+ if ok {
+ return presence, nil
+ }
+ return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence)
+}
+
+// GetPresenceMulti retrieves multiple users' presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// If any presence is unavailable, an appengine.MultiError is returned
+func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) {
+ req := &pb.BulkPresenceRequest{
+ Jid: to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.BulkPresenceResponse{}
+
+ if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil {
+ return nil, err
+ }
+
+ presences := make([]string, 0, len(res.PresenceResponse))
+ errs := appengine.MultiError{}
+
+ addResult := func(presence string, err error) {
+ presences = append(presences, presence)
+ errs = append(errs, err)
+ }
+
+ anyErr := false
+ for _, subres := range res.PresenceResponse {
+ if !subres.GetValid() {
+ anyErr = true
+ addResult("", ErrInvalidJID)
+ continue
+ }
+ if !*subres.IsAvailable || subres.Presence == nil {
+ anyErr = true
+ addResult("", ErrPresenceUnavailable)
+ continue
+ }
+ presence, ok := presenceMap[*subres.Presence]
+ if ok {
+ addResult(presence, nil)
+ } else {
+ anyErr = true
+ addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence))
+ }
+ }
+ if anyErr {
+ return presences, errs
+ }
+ return presences, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name)
+}