govendor fetch github.com/hashicorp/go-plugin/...

This puts us on a version that has grpc protocol support. Although we're
not actually using that yet, the plugin has handshake changed slightly to
allow plugins to declare whether they use the old or new protocols, and
so this upgrade allows us to support plugins that were built against
newer versions of go-plugin that include this extra field in the
handshake.

This fixes #15756.
This commit is contained in:
Martin Atkins 2017-08-11 10:19:46 -07:00
parent d756e59824
commit ee5fc3b986
114 changed files with 11994 additions and 2769 deletions

139
vendor/github.com/golang/protobuf/ptypes/any.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}

168
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
}

139
vendor/github.com/golang/protobuf/ptypes/any/any.proto generated vendored Normal file
View File

@ -0,0 +1,139 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}

35
vendor/github.com/golang/protobuf/ptypes/doc.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package ptypes contains code for interacting with well-known types.
*/
package ptypes

102
vendor/github.com/golang/protobuf/ptypes/duration.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
Nanos: int32(nanos),
}
}

View File

@ -0,0 +1,146 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/duration/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 189 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xfb, 0x83, 0x91, 0x71, 0x11, 0x13,
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
0x01, 0x00, 0x00, 0xff, 0xff, 0x45, 0x5a, 0x81, 0x3d, 0x0e, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,117 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

66
vendor/github.com/golang/protobuf/ptypes/regen.sh generated vendored Executable file
View File

@ -0,0 +1,66 @@
#!/bin/bash -e
#
# This script fetches and rebuilds the "well-known types" protocol buffers.
# To run this you will need protoc and goprotobuf installed;
# see https://github.com/golang/protobuf for instructions.
# You also need Go and Git installed.
PKG=github.com/golang/protobuf/ptypes
UPSTREAM=https://github.com/google/protobuf
UPSTREAM_SUBDIR=src/google/protobuf
PROTO_FILES='
any.proto
duration.proto
empty.proto
struct.proto
timestamp.proto
wrappers.proto
'
function die() {
echo 1>&2 $*
exit 1
}
# Sanity check that the right tools are accessible.
for tool in go git protoc protoc-gen-go; do
q=$(which $tool) || die "didn't find $tool"
echo 1>&2 "$tool: $q"
done
tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
trap 'rm -rf $tmpdir' EXIT
echo -n 1>&2 "finding package dir... "
pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
cd $base
echo 1>&2 "fetching latest protos... "
git clone -q $UPSTREAM $tmpdir
# Pass 1: build mapping from upstream filename to our filename.
declare -A filename_map
for f in $(cd $PKG && find * -name '*.proto'); do
echo -n 1>&2 "looking for latest version of $f... "
up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
echo 1>&2 $up
if [ $(echo $up | wc -w) != "1" ]; then
die "not exactly one match"
fi
filename_map[$up]=$f
done
# Pass 2: copy files
for up in "${!filename_map[@]}"; do
f=${filename_map[$up]}
shortname=$(basename $f | sed 's,\.proto$,,')
cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
done
# Run protoc once per package.
for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
echo 1>&2 "* $dir"
protoc --go_out=. $dir/*.proto
done
echo 1>&2 "All OK"

134
vendor/github.com/golang/protobuf/ptypes/timestamp.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *tspb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}

View File

@ -0,0 +1,162 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
/*
Package timestamp is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
It has these top-level messages:
Timestamp
*/
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x8e, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x4c, 0x27, 0x3e,
0xb8, 0x89, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0xdc, 0xfc, 0x83, 0x91, 0x71, 0x11,
0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, 0xe1,
0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, 0x8c,
0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x59, 0x0a, 0x4d, 0x13, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,133 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}

21
vendor/github.com/hashicorp/go-hclog/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 HashiCorp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

123
vendor/github.com/hashicorp/go-hclog/README.md generated vendored Normal file
View File

@ -0,0 +1,123 @@
# go-hclog
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
`go-hclog` is a package for Go that provides a simple key/value logging
interface for use in development and production environments.
It provides logging levels that provide decreased output based upon the
desired amount of output, unlike the standard library `log` package.
It does not provide `Printf` style logging, only key/value logging that is
exposed as arguments to the logging functions for simplicity.
It provides a human readable output mode for use in development as well as
JSON output mode for production.
## Stability Note
While this library is fully open source and HashiCorp will be maintaining it
(since we are and will be making extensive use of it), the API and output
format is subject to minor changes as we fully bake and vet it in our projects.
This notice will be removed once it's fully integrated into our major projects
and no further changes are anticipated.
## Installation and Docs
Install using `go get github.com/hashicorp/go-hclog`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/go-hclog
## Usage
### Use the global logger
```go
hclog.Default().Info("hello world")
```
```text
2017-07-05T16:15:55.167-0700 [INFO ] hello world
```
(Note timestamps are removed in future examples for brevity.)
### Create a new logger
```go
appLogger := hclog.New(&hclog.LoggerOptions{
Name: "my-app",
Level: hclog.LevelFromString("DEBUG"),
})
```
### Emit an Info level message with 2 key/value pairs
```go
input := "5.5"
_, err := strconv.ParseInt(input, 10, 32)
if err != nil {
appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
}
```
```text
... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
```
### Create a new Logger for a major subsystem
```go
subsystemLogger := appLogger.Named("transport")
subsystemLogger.Info("we are transporting something")
```
```text
... [INFO ] my-app.transport: we are transporting something
```
Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
reflecting both the application and subsystem names.
### Create a new Logger with fixed key/value pairs
Using `With()` will include a specific key-value pair in all messages emitted
by that logger.
```go
requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
requestLogger := subsystemLogger.With("request", requestID)
requestLogger.Info("we are transporting a request")
```
```text
... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
```
This allows sub Loggers to be context specific without having to thread that
into all the callers.
### Use this with code that uses the standard library logger
If you want to use the standard library's `log.Logger` interface you can wrap
`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
it with the familiar `Println()`, `Printf()`, etc. For example:
```go
stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
InferLevels: true,
})
// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
stdLogger.Printf("[DEBUG] %+v", stdLogger)
```
```text
... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
```
Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
specify `InferLevels: true`, you will not see any output here. You must change
`appLogger` to `DEBUG` to see output. See the docs for more information.

34
vendor/github.com/hashicorp/go-hclog/global.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package hclog
import (
"sync"
)
var (
protect sync.Once
def Logger
// The options used to create the Default logger. These are
// read only when the Default logger is created, so set them
// as soon as the process starts.
DefaultOptions = &LoggerOptions{
Level: DefaultLevel,
Output: DefaultOutput,
}
)
// Return a logger that is held globally. This can be a good starting
// place, and then you can use .With() and .Name() to create sub-loggers
// to be used in more specific contexts.
func Default() Logger {
protect.Do(func() {
def = New(DefaultOptions)
})
return def
}
// A short alias for Default()
func L() Logger {
return Default()
}

385
vendor/github.com/hashicorp/go-hclog/int.go generated vendored Normal file
View File

@ -0,0 +1,385 @@
package hclog
import (
"bufio"
"encoding/json"
"fmt"
"log"
"os"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
var (
_levelToBracket = map[Level]string{
Debug: "[DEBUG]",
Trace: "[TRACE]",
Info: "[INFO ]",
Warn: "[WARN ]",
Error: "[ERROR]",
}
)
// Given the options (nil for defaults), create a new Logger
func New(opts *LoggerOptions) Logger {
if opts == nil {
opts = &LoggerOptions{}
}
output := opts.Output
if output == nil {
output = os.Stderr
}
level := opts.Level
if level == NoLevel {
level = DefaultLevel
}
return &intLogger{
m: new(sync.Mutex),
json: opts.JSONFormat,
caller: opts.IncludeLocation,
name: opts.Name,
w: bufio.NewWriter(output),
level: level,
}
}
// The internal logger implementation. Internal in that it is defined entirely
// by this package.
type intLogger struct {
json bool
caller bool
name string
// this is a pointer so that it's shared by any derived loggers, since
// those derived loggers share the bufio.Writer as well.
m *sync.Mutex
w *bufio.Writer
level Level
implied []interface{}
}
// Make sure that intLogger is a Logger
var _ Logger = &intLogger{}
// The time format to use for logging. This is a version of RFC3339 that
// contains millisecond precision
const TimeFormat = "2006-01-02T15:04:05.000Z0700"
// Log a message and a set of key/value pairs if the given level is at
// or more severe that the threshold configured in the Logger.
func (z *intLogger) Log(level Level, msg string, args ...interface{}) {
if level < z.level {
return
}
t := time.Now()
z.m.Lock()
defer z.m.Unlock()
if z.json {
z.logJson(t, level, msg, args...)
} else {
z.log(t, level, msg, args...)
}
z.w.Flush()
}
// Cleanup a path by returning the last 2 segments of the path only.
func trimCallerPath(path string) string {
// lovely borrowed from zap
// nb. To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
//
// Find the last separator.
//
idx := strings.LastIndexByte(path, '/')
if idx == -1 {
return path
}
// Find the penultimate separator.
idx = strings.LastIndexByte(path[:idx], '/')
if idx == -1 {
return path
}
return path[idx+1:]
}
// Non-JSON logging format function
func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) {
z.w.WriteString(t.Format(TimeFormat))
z.w.WriteByte(' ')
s, ok := _levelToBracket[level]
if ok {
z.w.WriteString(s)
} else {
z.w.WriteString("[UNKN ]")
}
if z.caller {
if _, file, line, ok := runtime.Caller(3); ok {
z.w.WriteByte(' ')
z.w.WriteString(trimCallerPath(file))
z.w.WriteByte(':')
z.w.WriteString(strconv.Itoa(line))
z.w.WriteByte(':')
}
}
z.w.WriteByte(' ')
if z.name != "" {
z.w.WriteString(z.name)
z.w.WriteString(": ")
}
z.w.WriteString(msg)
args = append(z.implied, args...)
var stacktrace CapturedStacktrace
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
stacktrace = cs
} else {
args = append(args, "<unknown>")
}
}
z.w.WriteByte(':')
FOR:
for i := 0; i < len(args); i = i + 2 {
var val string
switch st := args[i+1].(type) {
case string:
val = st
case int:
val = strconv.FormatInt(int64(st), 10)
case int64:
val = strconv.FormatInt(int64(st), 10)
case int32:
val = strconv.FormatInt(int64(st), 10)
case int16:
val = strconv.FormatInt(int64(st), 10)
case int8:
val = strconv.FormatInt(int64(st), 10)
case uint:
val = strconv.FormatUint(uint64(st), 10)
case uint64:
val = strconv.FormatUint(uint64(st), 10)
case uint32:
val = strconv.FormatUint(uint64(st), 10)
case uint16:
val = strconv.FormatUint(uint64(st), 10)
case uint8:
val = strconv.FormatUint(uint64(st), 10)
case CapturedStacktrace:
stacktrace = st
continue FOR
default:
val = fmt.Sprintf("%v", st)
}
z.w.WriteByte(' ')
z.w.WriteString(args[i].(string))
z.w.WriteByte('=')
if strings.ContainsAny(val, " \t\n\r") {
z.w.WriteByte('"')
z.w.WriteString(val)
z.w.WriteByte('"')
} else {
z.w.WriteString(val)
}
}
}
z.w.WriteString("\n")
if stacktrace != "" {
z.w.WriteString(string(stacktrace))
}
}
// JSON logging function
func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) {
vals := map[string]interface{}{
"@message": msg,
"@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"),
}
var levelStr string
switch level {
case Error:
levelStr = "error"
case Warn:
levelStr = "warn"
case Info:
levelStr = "info"
case Debug:
levelStr = "debug"
case Trace:
levelStr = "trace"
default:
levelStr = "all"
}
vals["@level"] = levelStr
if z.name != "" {
vals["@module"] = z.name
}
if z.caller {
if _, file, line, ok := runtime.Caller(3); ok {
vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
}
}
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
vals["stacktrace"] = cs
} else {
args = append(args, "<unknown>")
}
}
for i := 0; i < len(args); i = i + 2 {
if _, ok := args[i].(string); !ok {
// As this is the logging function not much we can do here
// without injecting into logs...
continue
}
vals[args[i].(string)] = args[i+1]
}
}
err := json.NewEncoder(z.w).Encode(vals)
if err != nil {
panic(err)
}
}
// Emit the message and args at DEBUG level
func (z *intLogger) Debug(msg string, args ...interface{}) {
z.Log(Debug, msg, args...)
}
// Emit the message and args at TRACE level
func (z *intLogger) Trace(msg string, args ...interface{}) {
z.Log(Trace, msg, args...)
}
// Emit the message and args at INFO level
func (z *intLogger) Info(msg string, args ...interface{}) {
z.Log(Info, msg, args...)
}
// Emit the message and args at WARN level
func (z *intLogger) Warn(msg string, args ...interface{}) {
z.Log(Warn, msg, args...)
}
// Emit the message and args at ERROR level
func (z *intLogger) Error(msg string, args ...interface{}) {
z.Log(Error, msg, args...)
}
// Indicate that the logger would emit TRACE level logs
func (z *intLogger) IsTrace() bool {
return z.level == Trace
}
// Indicate that the logger would emit DEBUG level logs
func (z *intLogger) IsDebug() bool {
return z.level <= Debug
}
// Indicate that the logger would emit INFO level logs
func (z *intLogger) IsInfo() bool {
return z.level <= Info
}
// Indicate that the logger would emit WARN level logs
func (z *intLogger) IsWarn() bool {
return z.level <= Warn
}
// Indicate that the logger would emit ERROR level logs
func (z *intLogger) IsError() bool {
return z.level <= Error
}
// Return a sub-Logger for which every emitted log message will contain
// the given key/value pairs. This is used to create a context specific
// Logger.
func (z *intLogger) With(args ...interface{}) Logger {
var nz intLogger = *z
nz.implied = append(nz.implied, args...)
return &nz
}
// Create a new sub-Logger that a name decending from the current name.
// This is used to create a subsystem specific Logger.
func (z *intLogger) Named(name string) Logger {
var nz intLogger = *z
if nz.name != "" {
nz.name = nz.name + "." + name
}
return &nz
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy.
func (z *intLogger) ResetNamed(name string) Logger {
var nz intLogger = *z
nz.name = name
return &nz
}
// Create a *log.Logger that will send it's data through this Logger. This
// allows packages that expect to be using the standard library log to actually
// use this logger.
func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0)
}

138
vendor/github.com/hashicorp/go-hclog/log.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
package hclog
import (
"io"
"log"
"os"
"strings"
)
var (
DefaultOutput = os.Stderr
DefaultLevel = Info
)
type Level int
const (
// This is a special level used to indicate that no level has been
// set and allow for a default to be used.
NoLevel Level = 0
// The most verbose level. Intended to be used for the tracing of actions
// in code, such as function enters/exits, etc.
Trace Level = 1
// For programmer lowlevel analysis.
Debug Level = 2
// For information about steady state operations.
Info Level = 3
// For information about rare but handled events.
Warn Level = 4
// For information about unrecoverable events.
Error Level = 5
)
// LevelFromString returns a Level type for the named log level, or "NoLevel" if
// the level string is invalid. This facilitates setting the log level via
// config or environment variable by name in a predictable way.
func LevelFromString(levelStr string) Level {
// We don't care about case. Accept "INFO" or "info"
levelStr = strings.ToLower(strings.TrimSpace(levelStr))
switch levelStr {
case "trace":
return Trace
case "debug":
return Debug
case "info":
return Info
case "warn":
return Warn
case "error":
return Error
default:
return NoLevel
}
}
// The main Logger interface. All code should code against this interface only.
type Logger interface {
// Args are alternating key, val pairs
// keys must be strings
// vals can be any type, but display is implementation specific
// Emit a message and key/value pairs at the TRACE level
Trace(msg string, args ...interface{})
// Emit a message and key/value pairs at the DEBUG level
Debug(msg string, args ...interface{})
// Emit a message and key/value pairs at the INFO level
Info(msg string, args ...interface{})
// Emit a message and key/value pairs at the WARN level
Warn(msg string, args ...interface{})
// Emit a message and key/value pairs at the ERROR level
Error(msg string, args ...interface{})
// Indicate if TRACE logs would be emitted. This and the other Is* guards
// are used to elide expensive logging code based on the current level.
IsTrace() bool
// Indicate if DEBUG logs would be emitted. This and the other Is* guards
IsDebug() bool
// Indicate if INFO logs would be emitted. This and the other Is* guards
IsInfo() bool
// Indicate if WARN logs would be emitted. This and the other Is* guards
IsWarn() bool
// Indicate if ERROR logs would be emitted. This and the other Is* guards
IsError() bool
// Creates a sublogger that will always have the given key/value pairs
With(args ...interface{}) Logger
// Create a logger that will prepend the name string on the front of all messages.
// If the logger already has a name, the new value will be appended to the current
// name. That way, a major subsystem can use this to decorate all it's own logs
// without losing context.
Named(name string) Logger
// Create a logger that will prepend the name string on the front of all messages.
// This sets the name of the logger to the value directly, unlike Named which honor
// the current name as well.
ResetNamed(name string) Logger
// Return a value that conforms to the stdlib log.Logger interface
StandardLogger(opts *StandardLoggerOptions) *log.Logger
}
type StandardLoggerOptions struct {
// Indicate that some minimal parsing should be done on strings to try
// and detect their level and re-emit them.
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
// [DEBUG] and strip it off before reapplying it.
InferLevels bool
}
type LoggerOptions struct {
// Name of the subsystem to prefix logs with
Name string
// The threshold for the logger. Anything less severe is supressed
Level Level
// Where to write the logs to. Defaults to os.Stdout if nil
Output io.Writer
// Control if the output should be in JSON.
JSONFormat bool
// Intclude file and line information in each log line
IncludeLocation bool
}

108
vendor/github.com/hashicorp/go-hclog/stacktrace.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package hclog
import (
"bytes"
"runtime"
"strconv"
"strings"
"sync"
)
var (
_stacktraceIgnorePrefixes = []string{
"runtime.goexit",
"runtime.main",
}
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
)
// A stacktrace gathered by a previous call to log.Stacktrace. If passed
// to a logging function, the stacktrace will be appended.
type CapturedStacktrace string
// Gather a stacktrace of the current goroutine and return it to be passed
// to a logging function.
func Stacktrace() CapturedStacktrace {
return CapturedStacktrace(takeStacktrace())
}
func takeStacktrace() string {
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)
var buffer bytes.Buffer
for {
// Skip the call to runtime.Counters and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
n := runtime.Callers(2, programCounters.pcs)
if n < cap(programCounters.pcs) {
programCounters.pcs = programCounters.pcs[:n]
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters.pcs)
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if shouldIgnoreStacktraceFunction(frame.Function) {
continue
}
if i != 0 {
buffer.WriteByte('\n')
}
i++
buffer.WriteString(frame.Function)
buffer.WriteByte('\n')
buffer.WriteByte('\t')
buffer.WriteString(frame.File)
buffer.WriteByte(':')
buffer.WriteString(strconv.Itoa(int(frame.Line)))
}
return buffer.String()
}
func shouldIgnoreStacktraceFunction(function string) bool {
for _, prefix := range _stacktraceIgnorePrefixes {
if strings.HasPrefix(function, prefix) {
return true
}
}
return false
}
type programCounters struct {
pcs []uintptr
}
func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
}

62
vendor/github.com/hashicorp/go-hclog/stdlog.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package hclog
import (
"bytes"
"strings"
)
// Provides a io.Writer to shim the data out of *log.Logger
// and back into our Logger. This is basically the only way to
// build upon *log.Logger.
type stdlogAdapter struct {
hl Logger
inferLevels bool
}
// Take the data, infer the levels if configured, and send it through
// a regular Logger
func (s *stdlogAdapter) Write(data []byte) (int, error) {
str := string(bytes.TrimRight(data, " \t\n"))
if s.inferLevels {
level, str := s.pickLevel(str)
switch level {
case Trace:
s.hl.Trace(str)
case Debug:
s.hl.Debug(str)
case Info:
s.hl.Info(str)
case Warn:
s.hl.Warn(str)
case Error:
s.hl.Error(str)
default:
s.hl.Info(str)
}
} else {
s.hl.Info(str)
}
return len(data), nil
}
// Detect, based on conventions, what log level this is
func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
switch {
case strings.HasPrefix(str, "[DEBUG]"):
return Debug, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[TRACE]"):
return Trace, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[INFO]"):
return Info, strings.TrimSpace(str[6:])
case strings.HasPrefix(str, "[WARN]"):
return Warn, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[ERROR]"):
return Error, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[ERR]"):
return Error, strings.TrimSpace(str[5:])
default:
return Info, str
}
}

View File

@ -1,10 +1,9 @@
# Go Plugin System over RPC # Go Plugin System over RPC
`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
that has been in use by HashiCorp tooling for over 3 years. While initially that has been in use by HashiCorp tooling for over 4 years. While initially
created for [Packer](https://www.packer.io), it has since been used by created for [Packer](https://www.packer.io), it is additionally in use by
[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), [Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and
with plans to also use it for [Nomad](https://www.nomadproject.io) and
[Vault](https://www.vaultproject.io). [Vault](https://www.vaultproject.io).
While the plugin system is over RPC, it is currently only designed to work While the plugin system is over RPC, it is currently only designed to work
@ -24,6 +23,11 @@ interface as if it were going to run in the same process. For a plugin user:
you just use and call functions on an interface as if it were in the same you just use and call functions on an interface as if it were in the same
process. This plugin system handles the communication in between. process. This plugin system handles the communication in between.
**Cross-language support.** Plugins can be written (and consumed) by
almost every major language. This library supports serving plugins via
[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written
in any language.
**Complex arguments and return values are supported.** This library **Complex arguments and return values are supported.** This library
provides APIs for handling complex arguments and return values such provides APIs for handling complex arguments and return values such
as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
@ -37,7 +41,10 @@ and the plugin can call back into the host process.
**Built-in Logging.** Any plugins that use the `log` standard library **Built-in Logging.** Any plugins that use the `log` standard library
will have log data automatically sent to the host process. The host will have log data automatically sent to the host process. The host
process will mirror this output prefixed with the path to the plugin process will mirror this output prefixed with the path to the plugin
binary. This makes debugging with plugins simple. binary. This makes debugging with plugins simple. If the host system
uses [hclog](https://github.com/hashicorp/go-hclog) then the log data
will be structured. If the plugin also uses hclog, logs from the plugin
will be sent to the host hclog and be structured.
**Protocol Versioning.** A very basic "protocol version" is supported that **Protocol Versioning.** A very basic "protocol version" is supported that
can be incremented to invalidate any previous plugins. This is useful when can be incremented to invalidate any previous plugins. This is useful when
@ -62,13 +69,18 @@ This requires the host/plugin to know this is possible and daemonize
properly. `NewClient` takes a `ReattachConfig` to determine if and how to properly. `NewClient` takes a `ReattachConfig` to determine if and how to
reattach. reattach.
**Cryptographically Secure Plugins.** Plugins can be verified with an expected
checksum and RPC communications can be configured to use TLS. The host process
must be properly secured to protect this configuration.
## Architecture ## Architecture
The HashiCorp plugin system works by launching subprocesses and communicating The HashiCorp plugin system works by launching subprocesses and communicating
over RPC (using standard `net/rpc`). A single connection is made between over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io). A single
any plugin and the host process, and we use a connection is made between any plugin and the host process. For net/rpc-based
[connection multiplexing](https://github.com/hashicorp/yamux) plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux)
library to multiplex any other connections on top. library to multiplex any other connections on top. For gRPC-based plugins,
the HTTP2 protocol handles multiplexing.
This architecture has a number of benefits: This architecture has a number of benefits:
@ -76,8 +88,8 @@ This architecture has a number of benefits:
panic the plugin user. panic the plugin user.
* Plugins are very easy to write: just write a Go application and `go build`. * Plugins are very easy to write: just write a Go application and `go build`.
Theoretically you could also use another language as long as it can Or use any other language to write a gRPC server with a tiny amount of
communicate the Go `net/rpc` protocol but this hasn't yet been tried. boilerplate to support go-plugin.
* Plugins are very easy to install: just put the binary in a location where * Plugins are very easy to install: just put the binary in a location where
the host will find it (depends on the host but this library also provides the host will find it (depends on the host but this library also provides
@ -85,8 +97,8 @@ This architecture has a number of benefits:
* Plugins can be relatively secure: The plugin only has access to the * Plugins can be relatively secure: The plugin only has access to the
interfaces and args given to it, not to the entire memory space of the interfaces and args given to it, not to the entire memory space of the
process. More security features are planned (see the coming soon section process. Additionally, go-plugin can communicate with the plugin over
below). TLS.
## Usage ## Usage
@ -97,10 +109,9 @@ high-level steps that must be done. Examples are available in the
1. Choose the interface(s) you want to expose for plugins. 1. Choose the interface(s) you want to expose for plugins.
2. For each interface, implement an implementation of that interface 2. For each interface, implement an implementation of that interface
that communicates over an `*rpc.Client` (from the standard `net/rpc` that communicates over a `net/rpc` connection or other a
package) for every function call. Likewise, implement the RPC server [gRPC](http://www.grpc.io) connection or both. You'll have to implement
struct this communicates to which is then communicating to a real, both a client and server implementation.
concrete implementation.
3. Create a `Plugin` implementation that knows how to create the RPC 3. Create a `Plugin` implementation that knows how to create the RPC
client/server for a given plugin type. client/server for a given plugin type.
@ -125,10 +136,6 @@ improvements we can make.
At this point in time, the roadmap for the plugin system is: At this point in time, the roadmap for the plugin system is:
**Cryptographically Secure Plugins.** We'll implement signing plugins
and loading signed plugins in order to allow Vault to make use of multi-process
in a secure way.
**Semantic Versioning.** Plugins will be able to implement a semantic version. **Semantic Versioning.** Plugins will be able to implement a semantic version.
This plugin system will give host processes a system for constraining This plugin system will give host processes a system for constraining
versions. This is in addition to the protocol versioning already present versions. This is in addition to the protocol versioning already present

View File

@ -2,8 +2,11 @@ package plugin
import ( import (
"bufio" "bufio"
"crypto/subtle"
"crypto/tls"
"errors" "errors"
"fmt" "fmt"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@ -17,6 +20,10 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"unicode" "unicode"
"encoding/json"
hclog "github.com/hashicorp/go-hclog"
) )
// If this is 1, then we've called CleanupClients. This can be used // If this is 1, then we've called CleanupClients. This can be used
@ -35,6 +42,22 @@ var (
// ErrProcessNotFound is returned when a client is instantiated to // ErrProcessNotFound is returned when a client is instantiated to
// reattach to an existing process and it isn't found. // reattach to an existing process and it isn't found.
ErrProcessNotFound = errors.New("Reattachment process not found") ErrProcessNotFound = errors.New("Reattachment process not found")
// ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
// the one provided in the SecureConfig.
ErrChecksumsDoNotMatch = errors.New("checksums did not match")
// ErrSecureNoChecksum is returned when an empty checksum is provided to the
// SecureConfig.
ErrSecureConfigNoChecksum = errors.New("no checksum provided")
// ErrSecureNoHash is returned when a nil Hash object is provided to the
// SecureConfig.
ErrSecureConfigNoHash = errors.New("no hash implementation provided")
// ErrSecureConfigAndReattach is returned when both Reattach and
// SecureConfig are set.
ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
) )
// Client handles the lifecycle of a plugin application. It launches // Client handles the lifecycle of a plugin application. It launches
@ -55,7 +78,9 @@ type Client struct {
l sync.Mutex l sync.Mutex
address net.Addr address net.Addr
process *os.Process process *os.Process
client *RPCClient client ClientProtocol
protocol Protocol
logger hclog.Logger
} }
// ClientConfig is the configuration used to initialize a new // ClientConfig is the configuration used to initialize a new
@ -79,6 +104,13 @@ type ClientConfig struct {
Cmd *exec.Cmd Cmd *exec.Cmd
Reattach *ReattachConfig Reattach *ReattachConfig
// SecureConfig is configuration for verifying the integrity of the
// executable. It can not be used with Reattach.
SecureConfig *SecureConfig
// TLSConfig is used to enable TLS on the RPC client.
TLSConfig *tls.Config
// Managed represents if the client should be managed by the // Managed represents if the client should be managed by the
// plugin package or not. If true, then by calling CleanupClients, // plugin package or not. If true, then by calling CleanupClients,
// it will automatically be cleaned up. Otherwise, the client // it will automatically be cleaned up. Otherwise, the client
@ -109,16 +141,76 @@ type ClientConfig struct {
// sync any of these streams. // sync any of these streams.
SyncStdout io.Writer SyncStdout io.Writer
SyncStderr io.Writer SyncStderr io.Writer
// AllowedProtocols is a list of allowed protocols. If this isn't set,
// then only netrpc is allowed. This is so that older go-plugin systems
// can show friendly errors if they see a plugin with an unknown
// protocol.
//
// By setting this, you can cause an error immediately on plugin start
// if an unsupported protocol is used with a good error message.
//
// If this isn't set at all (nil value), then only net/rpc is accepted.
// This is done for legacy reasons. You must explicitly opt-in to
// new protocols.
AllowedProtocols []Protocol
// Logger is the logger that the client will used. If none is provided,
// it will default to hclog's default logger.
Logger hclog.Logger
} }
// ReattachConfig is used to configure a client to reattach to an // ReattachConfig is used to configure a client to reattach to an
// already-running plugin process. You can retrieve this information by // already-running plugin process. You can retrieve this information by
// calling ReattachConfig on Client. // calling ReattachConfig on Client.
type ReattachConfig struct { type ReattachConfig struct {
Protocol Protocol
Addr net.Addr Addr net.Addr
Pid int Pid int
} }
// SecureConfig is used to configure a client to verify the integrity of an
// executable before running. It does this by verifying the checksum is
// expected. Hash is used to specify the hashing method to use when checksumming
// the file. The configuration is verified by the client by calling the
// SecureConfig.Check() function.
//
// The host process should ensure the checksum was provided by a trusted and
// authoritative source. The binary should be installed in such a way that it
// can not be modified by an unauthorized user between the time of this check
// and the time of execution.
type SecureConfig struct {
Checksum []byte
Hash hash.Hash
}
// Check takes the filepath to an executable and returns true if the checksum of
// the file matches the checksum provided in the SecureConfig.
func (s *SecureConfig) Check(filePath string) (bool, error) {
if len(s.Checksum) == 0 {
return false, ErrSecureConfigNoChecksum
}
if s.Hash == nil {
return false, ErrSecureConfigNoHash
}
file, err := os.Open(filePath)
if err != nil {
return false, err
}
defer file.Close()
_, err = io.Copy(s.Hash, file)
if err != nil {
return false, err
}
sum := s.Hash.Sum(nil)
return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
}
// This makes sure all the managed subprocesses are killed and properly // This makes sure all the managed subprocesses are killed and properly
// logged. This should be called before the parent process running the // logged. This should be called before the parent process running the
// plugins exits. // plugins exits.
@ -174,7 +266,19 @@ func NewClient(config *ClientConfig) (c *Client) {
config.SyncStderr = ioutil.Discard config.SyncStderr = ioutil.Discard
} }
c = &Client{config: config} if config.AllowedProtocols == nil {
config.AllowedProtocols = []Protocol{ProtocolNetRPC}
}
if config.Logger == nil {
config.Logger = hclog.Default()
config.Logger = config.Logger.ResetNamed("plugin")
}
c = &Client{
config: config,
logger: config.Logger,
}
if config.Managed { if config.Managed {
managedClientsLock.Lock() managedClientsLock.Lock()
managedClients = append(managedClients, c) managedClients = append(managedClients, c)
@ -184,11 +288,11 @@ func NewClient(config *ClientConfig) (c *Client) {
return return
} }
// Client returns an RPC client for the plugin. // Client returns the protocol client for this connection.
// //
// Subsequent calls to this will return the same RPC client. // Subsequent calls to this will return the same client.
func (c *Client) Client() (*RPCClient, error) { func (c *Client) Client() (ClientProtocol, error) {
addr, err := c.Start() _, err := c.Start()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -200,29 +304,18 @@ func (c *Client) Client() (*RPCClient, error) {
return c.client, nil return c.client, nil
} }
// Connect to the client switch c.protocol {
conn, err := net.Dial(addr.Network(), addr.String()) case ProtocolNetRPC:
if err != nil { c.client, err = newRPCClient(c)
return nil, err
} case ProtocolGRPC:
if tcpConn, ok := conn.(*net.TCPConn); ok { c.client, err = newGRPCClient(c)
// Make sure to set keep alive so that the connection doesn't die
tcpConn.SetKeepAlive(true) default:
return nil, fmt.Errorf("unknown server protocol: %s", c.protocol)
} }
// Create the actual RPC client
c.client, err = NewRPCClient(conn, c.config.Plugins)
if err != nil { if err != nil {
conn.Close()
return nil, err
}
// Begin the stream syncing so that stdin, out, err work properly
err = c.client.SyncStreams(
c.config.SyncStdout,
c.config.SyncStderr)
if err != nil {
c.client.Close()
c.client = nil c.client = nil
return nil, err return nil, err
} }
@ -274,8 +367,7 @@ func (c *Client) Kill() {
if err != nil { if err != nil {
// If there was an error just log it. We're going to force // If there was an error just log it. We're going to force
// kill in a moment anyways. // kill in a moment anyways.
log.Printf( c.logger.Warn("error closing client during Kill", "err", err)
"[WARN] plugin: error closing client during Kill: %s", err)
} }
} }
} }
@ -318,9 +410,14 @@ func (c *Client) Start() (addr net.Addr, err error) {
{ {
cmdSet := c.config.Cmd != nil cmdSet := c.config.Cmd != nil
attachSet := c.config.Reattach != nil attachSet := c.config.Reattach != nil
secureSet := c.config.SecureConfig != nil
if cmdSet == attachSet { if cmdSet == attachSet {
return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
} }
if secureSet && attachSet {
return nil, ErrSecureConfigAndReattach
}
} }
// Create the logging channel for when we kill // Create the logging channel for when we kill
@ -350,7 +447,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
pidWait(pid) pidWait(pid)
// Log so we can see it // Log so we can see it
log.Printf("[DEBUG] plugin: reattached plugin process exited\n") c.logger.Debug("reattached plugin process exited")
// Mark it // Mark it
c.l.Lock() c.l.Lock()
@ -364,6 +461,11 @@ func (c *Client) Start() (addr net.Addr, err error) {
// Set the address and process // Set the address and process
c.address = c.config.Reattach.Addr c.address = c.config.Reattach.Addr
c.process = p c.process = p
c.protocol = c.config.Reattach.Protocol
if c.protocol == "" {
// Default the protocol to net/rpc for backwards compatibility
c.protocol = ProtocolNetRPC
}
return c.address, nil return c.address, nil
} }
@ -384,7 +486,15 @@ func (c *Client) Start() (addr net.Addr, err error) {
cmd.Stderr = stderr_w cmd.Stderr = stderr_w
cmd.Stdout = stdout_w cmd.Stdout = stdout_w
log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) if c.config.SecureConfig != nil {
if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
return nil, fmt.Errorf("error verifying checksum: %s", err)
} else if !ok {
return nil, ErrChecksumsDoNotMatch
}
}
c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args)
err = cmd.Start() err = cmd.Start()
if err != nil { if err != nil {
return return
@ -418,7 +528,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
cmd.Wait() cmd.Wait()
// Log and make sure to flush the logs write away // Log and make sure to flush the logs write away
log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) c.logger.Debug("plugin process exited", "path", cmd.Path)
os.Stderr.Sync() os.Stderr.Sync()
// Mark that we exited // Mark that we exited
@ -465,7 +575,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
timeout := time.After(c.config.StartTimeout) timeout := time.After(c.config.StartTimeout)
// Start looking for the address // Start looking for the address
log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) c.logger.Debug("waiting for RPC address", "path", cmd.Path)
select { select {
case <-timeout: case <-timeout:
err = errors.New("timeout while waiting for plugin to start") err = errors.New("timeout while waiting for plugin to start")
@ -475,7 +585,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
// Trim the line and split by "|" in order to get the parts of // Trim the line and split by "|" in order to get the parts of
// the output. // the output.
line := strings.TrimSpace(string(lineBytes)) line := strings.TrimSpace(string(lineBytes))
parts := strings.SplitN(line, "|", 4) parts := strings.SplitN(line, "|", 6)
if len(parts) < 4 { if len(parts) < 4 {
err = fmt.Errorf( err = fmt.Errorf(
"Unrecognized remote plugin message: %s\n\n"+ "Unrecognized remote plugin message: %s\n\n"+
@ -525,6 +635,27 @@ func (c *Client) Start() (addr net.Addr, err error) {
default: default:
err = fmt.Errorf("Unknown address type: %s", parts[3]) err = fmt.Errorf("Unknown address type: %s", parts[3])
} }
// If we have a server type, then record that. We default to net/rpc
// for backwards compatibility.
c.protocol = ProtocolNetRPC
if len(parts) >= 5 {
c.protocol = Protocol(parts[4])
}
found := false
for _, p := range c.config.AllowedProtocols {
if p == c.protocol {
found = true
break
}
}
if !found {
err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v",
c.protocol, c.config.AllowedProtocols)
return
}
} }
c.address = addr c.address = addr
@ -555,20 +686,83 @@ func (c *Client) ReattachConfig() *ReattachConfig {
} }
return &ReattachConfig{ return &ReattachConfig{
Protocol: c.protocol,
Addr: c.address, Addr: c.address,
Pid: c.config.Cmd.Process.Pid, Pid: c.config.Cmd.Process.Pid,
} }
} }
// Protocol returns the protocol of server on the remote end. This will
// start the plugin process if it isn't already started. Errors from
// starting the plugin are surpressed and ProtocolInvalid is returned. It
// is recommended you call Start explicitly before calling Protocol to ensure
// no errors occur.
func (c *Client) Protocol() Protocol {
_, err := c.Start()
if err != nil {
return ProtocolInvalid
}
return c.protocol
}
// dialer is compatible with grpc.WithDialer and creates the connection
// to the plugin.
func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
// Connect to the client
conn, err := net.Dial(c.address.Network(), c.address.String())
if err != nil {
return nil, err
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
// Make sure to set keep alive so that the connection doesn't die
tcpConn.SetKeepAlive(true)
}
// If we have a TLS config we wrap our connection. We only do this
// for net/rpc since gRPC uses its own mechanism for TLS.
if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil {
conn = tls.Client(conn, c.config.TLSConfig)
}
return conn, nil
}
func (c *Client) logStderr(r io.Reader) { func (c *Client) logStderr(r io.Reader) {
bufR := bufio.NewReader(r) bufR := bufio.NewReader(r)
for { for {
line, err := bufR.ReadString('\n') line, err := bufR.ReadString('\n')
if line != "" { if line != "" {
c.config.Stderr.Write([]byte(line)) c.config.Stderr.Write([]byte(line))
line = strings.TrimRightFunc(line, unicode.IsSpace) line = strings.TrimRightFunc(line, unicode.IsSpace)
log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line)
l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
// If output is not JSON format, print directly as error
if !isJSON(line) {
l.Debug("log from plugin", "entry", line)
} else {
// Parse JSON line received from the plugin into logEntry, and print via
// the client's logger
entry, err := parseJSON(line)
if err != nil {
l.Error("error parsing json from plugin", "error", err)
}
out := flattenKVPairs(entry.KVPairs)
l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat))
switch hclog.LevelFromString(entry.Level) {
case hclog.Trace:
l.Trace(entry.Message, out...)
case hclog.Debug:
l.Debug(entry.Message, out...)
case hclog.Info:
l.Info(entry.Message, out...)
case hclog.Warn:
l.Warn(entry.Message, out...)
case hclog.Error:
l.Error(entry.Message, out...)
}
}
} }
if err == io.EOF { if err == io.EOF {
@ -579,3 +773,8 @@ func (c *Client) logStderr(r io.Reader) {
// Flag that we've completed logging for others // Flag that we've completed logging for others
close(c.doneLogging) close(c.doneLogging)
} }
func isJSON(str string) bool {
var js json.RawMessage
return json.Unmarshal([]byte(str), &js) == nil
}

83
vendor/github.com/hashicorp/go-plugin/grpc_client.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package plugin
import (
"fmt"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/health/grpc_health_v1"
)
// newGRPCClient creates a new GRPCClient. The Client argument is expected
// to be successfully started already with a lock held.
func newGRPCClient(c *Client) (*GRPCClient, error) {
// Build dialing options.
opts := make([]grpc.DialOption, 0, 5)
// We use a custom dialer so that we can connect over unix domain sockets
opts = append(opts, grpc.WithDialer(c.dialer))
// go-plugin expects to block the connection
opts = append(opts, grpc.WithBlock())
// Fail right away
opts = append(opts, grpc.FailOnNonTempDialError(true))
// If we have no TLS configuration set, we need to explicitly tell grpc
// that we're connecting with an insecure connection.
if c.config.TLSConfig == nil {
opts = append(opts, grpc.WithInsecure())
} else {
opts = append(opts, grpc.WithTransportCredentials(
credentials.NewTLS(c.config.TLSConfig)))
}
// Connect. Note the first parameter is unused because we use a custom
// dialer that has the state to see the address.
conn, err := grpc.Dial("unused", opts...)
if err != nil {
return nil, err
}
return &GRPCClient{
Conn: conn,
Plugins: c.config.Plugins,
}, nil
}
// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
type GRPCClient struct {
Conn *grpc.ClientConn
Plugins map[string]Plugin
}
// ClientProtocol impl.
func (c *GRPCClient) Close() error {
return c.Conn.Close()
}
// ClientProtocol impl.
func (c *GRPCClient) Dispense(name string) (interface{}, error) {
raw, ok := c.Plugins[name]
if !ok {
return nil, fmt.Errorf("unknown plugin type: %s", name)
}
p, ok := raw.(GRPCPlugin)
if !ok {
return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
}
return p.GRPCClient(c.Conn)
}
// ClientProtocol impl.
func (c *GRPCClient) Ping() error {
client := grpc_health_v1.NewHealthClient(c.Conn)
_, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{
Service: GRPCServiceName,
})
return err
}

115
vendor/github.com/hashicorp/go-plugin/grpc_server.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package plugin
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
)
// GRPCServiceName is the name of the service that the health check should
// return as passing.
const GRPCServiceName = "plugin"
// DefaultGRPCServer can be used with the "GRPCServer" field for Server
// as a default factory method to create a gRPC server with no extra options.
func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server {
return grpc.NewServer(opts...)
}
// GRPCServer is a ServerType implementation that serves plugins over
// gRPC. This allows plugins to easily be written for other languages.
//
// The GRPCServer outputs a custom configuration as a base64-encoded
// JSON structure represented by the GRPCServerConfig config structure.
type GRPCServer struct {
// Plugins are the list of plugins to serve.
Plugins map[string]Plugin
// Server is the actual server that will accept connections. This
// will be used for plugin registration as well.
Server func([]grpc.ServerOption) *grpc.Server
// TLS should be the TLS configuration if available. If this is nil,
// the connection will not have transport security.
TLS *tls.Config
// DoneCh is the channel that is closed when this server has exited.
DoneCh chan struct{}
// Stdout/StderrLis are the readers for stdout/stderr that will be copied
// to the stdout/stderr connection that is output.
Stdout io.Reader
Stderr io.Reader
config GRPCServerConfig
server *grpc.Server
}
// ServerProtocol impl.
func (s *GRPCServer) Init() error {
// Create our server
var opts []grpc.ServerOption
if s.TLS != nil {
opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS)))
}
s.server = s.Server(opts)
// Register the health service
healthCheck := health.NewServer()
healthCheck.SetServingStatus(
GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
// Register all our plugins onto the gRPC server.
for k, raw := range s.Plugins {
p, ok := raw.(GRPCPlugin)
if !ok {
return fmt.Errorf("%q is not a GRPC-compatibile plugin", k)
}
if err := p.GRPCServer(s.server); err != nil {
return fmt.Errorf("error registring %q: %s", k, err)
}
}
return nil
}
// Config is the GRPCServerConfig encoded as JSON then base64.
func (s *GRPCServer) Config() string {
// Create a buffer that will contain our final contents
var buf bytes.Buffer
// Wrap the base64 encoding with JSON encoding.
if err := json.NewEncoder(&buf).Encode(s.config); err != nil {
// We panic since ths shouldn't happen under any scenario. We
// carefully control the structure being encoded here and it should
// always be successful.
panic(err)
}
return buf.String()
}
func (s *GRPCServer) Serve(lis net.Listener) {
// Start serving in a goroutine
go s.server.Serve(lis)
// Wait until graceful completion
<-s.DoneCh
}
// GRPCServerConfig is the extra configuration passed along for consumers
// to facilitate using GRPC plugins.
type GRPCServerConfig struct {
StdoutAddr string `json:"stdout_addr"`
StderrAddr string `json:"stderr_addr"`
}

73
vendor/github.com/hashicorp/go-plugin/log_entry.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package plugin
import (
"encoding/json"
"time"
)
// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host
type logEntry struct {
Message string `json:"@message"`
Level string `json:"@level"`
Timestamp time.Time `json:"timestamp"`
KVPairs []*logEntryKV `json:"kv_pairs"`
}
// logEntryKV is a key value pair within the Output payload
type logEntryKV struct {
Key string `json:"key"`
Value interface{} `json:"value"`
}
// flattenKVPairs is used to flatten KVPair slice into []interface{}
// for hclog consumption.
func flattenKVPairs(kvs []*logEntryKV) []interface{} {
var result []interface{}
for _, kv := range kvs {
result = append(result, kv.Key)
result = append(result, kv.Value)
}
return result
}
// parseJSON handles parsing JSON output
func parseJSON(input string) (*logEntry, error) {
var raw map[string]interface{}
entry := &logEntry{}
err := json.Unmarshal([]byte(input), &raw)
if err != nil {
return nil, err
}
// Parse hclog-specific objects
if v, ok := raw["@message"]; ok {
entry.Message = v.(string)
delete(raw, "@message")
}
if v, ok := raw["@level"]; ok {
entry.Level = v.(string)
delete(raw, "@level")
}
if v, ok := raw["@timestamp"]; ok {
t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string))
if err != nil {
return nil, err
}
entry.Timestamp = t
delete(raw, "@timestamp")
}
// Parse dynamic KV args from the hclog payload.
for k, v := range raw {
entry.KVPairs = append(entry.KVPairs, &logEntryKV{
Key: k,
Value: v,
})
}
return entry, nil
}

View File

@ -9,7 +9,10 @@
package plugin package plugin
import ( import (
"errors"
"net/rpc" "net/rpc"
"google.golang.org/grpc"
) )
// Plugin is the interface that is implemented to serve/connect to an // Plugin is the interface that is implemented to serve/connect to an
@ -23,3 +26,31 @@ type Plugin interface {
// serving that communicates to the server end of the plugin. // serving that communicates to the server end of the plugin.
Client(*MuxBroker, *rpc.Client) (interface{}, error) Client(*MuxBroker, *rpc.Client) (interface{}, error)
} }
// GRPCPlugin is the interface that is implemented to serve/connect to
// a plugin over gRPC.
type GRPCPlugin interface {
// GRPCServer should register this plugin for serving with the
// given GRPCServer. Unlike Plugin.Server, this is only called once
// since gRPC plugins serve singletons.
GRPCServer(*grpc.Server) error
// GRPCClient should return the interface implementation for the plugin
// you're serving via gRPC.
GRPCClient(*grpc.ClientConn) (interface{}, error)
}
// NetRPCUnsupportedPlugin implements Plugin but returns errors for the
// Server and Client functions. This will effectively disable support for
// net/rpc based plugins.
//
// This struct can be embedded in your struct.
type NetRPCUnsupportedPlugin struct{}
func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) {
return nil, errors.New("net/rpc plugin protocol not supported")
}
func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) {
return nil, errors.New("net/rpc plugin protocol not supported")
}

45
vendor/github.com/hashicorp/go-plugin/protocol.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package plugin
import (
"io"
"net"
)
// Protocol is an enum representing the types of protocols.
type Protocol string
const (
ProtocolInvalid Protocol = ""
ProtocolNetRPC Protocol = "netrpc"
ProtocolGRPC Protocol = "grpc"
)
// ServerProtocol is an interface that must be implemented for new plugin
// protocols to be servers.
type ServerProtocol interface {
// Init is called once to configure and initialize the protocol, but
// not start listening. This is the point at which all validation should
// be done and errors returned.
Init() error
// Config is extra configuration to be outputted to stdout. This will
// be automatically base64 encoded to ensure it can be parsed properly.
// This can be an empty string if additional configuration is not needed.
Config() string
// Serve is called to serve connections on the given listener. This should
// continue until the listener is closed.
Serve(net.Listener)
}
// ClientProtocol is an interface that must be implemented for new plugin
// protocols to be clients.
type ClientProtocol interface {
io.Closer
// Dispense dispenses a new instance of the plugin with the given name.
Dispense(string) (interface{}, error)
// Ping checks that the client connection is still healthy.
Ping() error
}

View File

@ -1,6 +1,7 @@
package plugin package plugin
import ( import (
"crypto/tls"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -19,6 +20,42 @@ type RPCClient struct {
stdout, stderr net.Conn stdout, stderr net.Conn
} }
// newRPCClient creates a new RPCClient. The Client argument is expected
// to be successfully started already with a lock held.
func newRPCClient(c *Client) (*RPCClient, error) {
// Connect to the client
conn, err := net.Dial(c.address.Network(), c.address.String())
if err != nil {
return nil, err
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
// Make sure to set keep alive so that the connection doesn't die
tcpConn.SetKeepAlive(true)
}
if c.config.TLSConfig != nil {
conn = tls.Client(conn, c.config.TLSConfig)
}
// Create the actual RPC client
result, err := NewRPCClient(conn, c.config.Plugins)
if err != nil {
conn.Close()
return nil, err
}
// Begin the stream syncing so that stdin, out, err work properly
err = result.SyncStreams(
c.config.SyncStdout,
c.config.SyncStderr)
if err != nil {
result.Close()
return nil, err
}
return result, nil
}
// NewRPCClient creates a client from an already-open connection-like value. // NewRPCClient creates a client from an already-open connection-like value.
// Dial is typically used instead. // Dial is typically used instead.
func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
@ -121,3 +158,13 @@ func (c *RPCClient) Dispense(name string) (interface{}, error) {
return p.Client(c.broker, rpc.NewClient(conn)) return p.Client(c.broker, rpc.NewClient(conn))
} }
// Ping pings the connection to ensure it is still alive.
//
// The error from the RPC call is returned exactly if you want to inspect
// it for further error analysis. Any error returned from here would indicate
// that the connection to the plugin is not healthy.
func (c *RPCClient) Ping() error {
var empty struct{}
return c.control.Call("Control.Ping", true, &empty)
}

View File

@ -34,10 +34,14 @@ type RPCServer struct {
lock sync.Mutex lock sync.Mutex
} }
// Accept accepts connections on a listener and serves requests for // ServerProtocol impl.
// each incoming connection. Accept blocks; the caller typically invokes func (s *RPCServer) Init() error { return nil }
// it in a go statement.
func (s *RPCServer) Accept(lis net.Listener) { // ServerProtocol impl.
func (s *RPCServer) Config() string { return "" }
// ServerProtocol impl.
func (s *RPCServer) Serve(lis net.Listener) {
for { for {
conn, err := lis.Accept() conn, err := lis.Accept()
if err != nil { if err != nil {
@ -122,6 +126,14 @@ type controlServer struct {
server *RPCServer server *RPCServer
} }
// Ping can be called to verify the connection (and likely the binary)
// is still alive to a plugin.
func (c *controlServer) Ping(
null bool, response *struct{}) error {
*response = struct{}{}
return nil
}
func (c *controlServer) Quit( func (c *controlServer) Quit(
null bool, response *struct{}) error { null bool, response *struct{}) error {
// End the server // End the server

View File

@ -1,6 +1,8 @@
package plugin package plugin
import ( import (
"crypto/tls"
"encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -11,6 +13,10 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"sync/atomic" "sync/atomic"
"github.com/hashicorp/go-hclog"
"google.golang.org/grpc"
) )
// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. // CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
@ -45,14 +51,37 @@ type ServeConfig struct {
// HandshakeConfig is the configuration that must match clients. // HandshakeConfig is the configuration that must match clients.
HandshakeConfig HandshakeConfig
// TLSProvider is a function that returns a configured tls.Config.
TLSProvider func() (*tls.Config, error)
// Plugins are the plugins that are served. // Plugins are the plugins that are served.
Plugins map[string]Plugin Plugins map[string]Plugin
// GRPCServer should be non-nil to enable serving the plugins over
// gRPC. This is a function to create the server when needed with the
// given server options. The server options populated by go-plugin will
// be for TLS if set. You may modify the input slice.
//
// Note that the grpc.Server will automatically be registered with
// the gRPC health checking service. This is not optional since go-plugin
// relies on this to implement Ping().
GRPCServer func([]grpc.ServerOption) *grpc.Server
}
// Protocol returns the protocol that this server should speak.
func (c *ServeConfig) Protocol() Protocol {
result := ProtocolNetRPC
if c.GRPCServer != nil {
result = ProtocolGRPC
}
return result
} }
// Serve serves the plugins given by ServeConfig. // Serve serves the plugins given by ServeConfig.
// //
// Serve doesn't return until the plugin is done being executed. Any // Serve doesn't return until the plugin is done being executed. Any
// errors will be outputted to the log. // errors will be outputted to os.Stderr.
// //
// This is the method that plugins should call in their main() functions. // This is the method that plugins should call in their main() functions.
func Serve(opts *ServeConfig) { func Serve(opts *ServeConfig) {
@ -77,6 +106,13 @@ func Serve(opts *ServeConfig) {
// Logging goes to the original stderr // Logging goes to the original stderr
log.SetOutput(os.Stderr) log.SetOutput(os.Stderr)
// internal logger to os.Stderr
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
Output: os.Stderr,
JSONFormat: true,
})
// Create our new stdout, stderr files. These will override our built-in // Create our new stdout, stderr files. These will override our built-in
// stdout/stderr so that it works across the stream boundary. // stdout/stderr so that it works across the stream boundary.
stdout_r, stdout_w, err := os.Pipe() stdout_r, stdout_w, err := os.Pipe()
@ -93,30 +129,86 @@ func Serve(opts *ServeConfig) {
// Register a listener so we can accept a connection // Register a listener so we can accept a connection
listener, err := serverListener() listener, err := serverListener()
if err != nil { if err != nil {
log.Printf("[ERR] plugin: plugin init: %s", err) logger.Error("plugin init error", "error", err)
return return
} }
defer listener.Close()
// Close the listener on return. We wrap this in a func() on purpose
// because the "listener" reference may change to TLS.
defer func() {
listener.Close()
}()
var tlsConfig *tls.Config
if opts.TLSProvider != nil {
tlsConfig, err = opts.TLSProvider()
if err != nil {
logger.Error("plugin tls init", "error", err)
return
}
}
// Create the channel to tell us when we're done // Create the channel to tell us when we're done
doneCh := make(chan struct{}) doneCh := make(chan struct{})
// Build the server type
var server ServerProtocol
switch opts.Protocol() {
case ProtocolNetRPC:
// If we have a TLS configuration then we wrap the listener
// ourselves and do it at that level.
if tlsConfig != nil {
listener = tls.NewListener(listener, tlsConfig)
}
// Create the RPC server to dispense // Create the RPC server to dispense
server := &RPCServer{ server = &RPCServer{
Plugins: opts.Plugins, Plugins: opts.Plugins,
Stdout: stdout_r, Stdout: stdout_r,
Stderr: stderr_r, Stderr: stderr_r,
DoneCh: doneCh, DoneCh: doneCh,
} }
case ProtocolGRPC:
// Create the gRPC server
server = &GRPCServer{
Plugins: opts.Plugins,
Server: opts.GRPCServer,
TLS: tlsConfig,
Stdout: stdout_r,
Stderr: stderr_r,
DoneCh: doneCh,
}
default:
panic("unknown server protocol: " + opts.Protocol())
}
// Initialize the servers
if err := server.Init(); err != nil {
logger.Error("protocol init", "error", err)
return
}
// Build the extra configuration
extra := ""
if v := server.Config(); v != "" {
extra = base64.StdEncoding.EncodeToString([]byte(v))
}
if extra != "" {
extra = "|" + extra
}
logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
// Output the address and service name to stdout so that core can bring it up. // Output the address and service name to stdout so that core can bring it up.
log.Printf("[DEBUG] plugin: plugin address: %s %s\n", fmt.Printf("%d|%d|%s|%s|%s%s\n",
listener.Addr().Network(), listener.Addr().String())
fmt.Printf("%d|%d|%s|%s\n",
CoreProtocolVersion, CoreProtocolVersion,
opts.ProtocolVersion, opts.ProtocolVersion,
listener.Addr().Network(), listener.Addr().Network(),
listener.Addr().String()) listener.Addr().String(),
opts.Protocol(),
extra)
os.Stdout.Sync() os.Stdout.Sync()
// Eat the interrupts // Eat the interrupts
@ -127,9 +219,7 @@ func Serve(opts *ServeConfig) {
for { for {
<-ch <-ch
newCount := atomic.AddInt32(&count, 1) newCount := atomic.AddInt32(&count, 1)
log.Printf( logger.Debug("plugin received interrupt signal, ignoring", "count", newCount)
"[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.",
newCount)
} }
}() }()
@ -137,10 +227,8 @@ func Serve(opts *ServeConfig) {
os.Stdout = stdout_w os.Stdout = stdout_w
os.Stderr = stderr_w os.Stderr = stderr_w
// Serve // Accept connections and wait for completion
go server.Accept(listener) go server.Serve(listener)
// Wait for the graceful exit
<-doneCh <-doneCh
} }

View File

@ -4,7 +4,9 @@ import (
"bytes" "bytes"
"net" "net"
"net/rpc" "net/rpc"
"testing"
"github.com/mitchellh/go-testing-interface"
"google.golang.org/grpc"
) )
// The testing file contains test helpers that you can use outside of // The testing file contains test helpers that you can use outside of
@ -12,7 +14,7 @@ import (
// TestConn is a helper function for returning a client and server // TestConn is a helper function for returning a client and server
// net.Conn connected to each other. // net.Conn connected to each other.
func TestConn(t *testing.T) (net.Conn, net.Conn) { func TestConn(t testing.T) (net.Conn, net.Conn) {
// Listen to any local port. This listener will be closed // Listen to any local port. This listener will be closed
// after a single connection is established. // after a single connection is established.
l, err := net.Listen("tcp", "127.0.0.1:0") l, err := net.Listen("tcp", "127.0.0.1:0")
@ -46,7 +48,7 @@ func TestConn(t *testing.T) (net.Conn, net.Conn) {
} }
// TestRPCConn returns a rpc client and server connected to each other. // TestRPCConn returns a rpc client and server connected to each other.
func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
clientConn, serverConn := TestConn(t) clientConn, serverConn := TestConn(t)
server := rpc.NewServer() server := rpc.NewServer()
@ -58,7 +60,7 @@ func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) {
// TestPluginRPCConn returns a plugin RPC client and server that are connected // TestPluginRPCConn returns a plugin RPC client and server that are connected
// together and configured. // together and configured.
func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
// Create two net.Conns we can use to shuttle our control connection // Create two net.Conns we can use to shuttle our control connection
clientConn, serverConn := TestConn(t) clientConn, serverConn := TestConn(t)
@ -74,3 +76,45 @@ func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServ
return client, server return client, server
} }
// TestPluginGRPCConn returns a plugin gRPC client and server that are connected
// together and configured. This is used to test gRPC connections.
func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
// Create a listener
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("err: %s", err)
}
// Start up the server
server := &GRPCServer{
Plugins: ps,
Server: DefaultGRPCServer,
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
}
if err := server.Init(); err != nil {
t.Fatalf("err: %s", err)
}
go server.Serve(l)
// Connect to the server
conn, err := grpc.Dial(
l.Addr().String(),
grpc.WithBlock(),
grpc.WithInsecure())
if err != nil {
t.Fatalf("err: %s", err)
}
// Connection successful, close the listener
l.Close()
// Create the client
client := &GRPCClient{
Conn: conn,
Plugins: ps,
}
return client, server
}

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,52 @@
# go-testing-interface
go-testing-interface is a Go library that exports an interface that
`*testing.T` implements as well as a runtime version you can use in its
place.
The purpose of this library is so that you can export test helpers as a
public API without depending on the "testing" package, since you can't
create a `*testing.T` struct manually. This lets you, for example, use the
public testing APIs to generate mock data at runtime, rather than just at
test time.
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface).
Given a test helper written using `go-testing-interface` like this:
import "github.com/mitchellh/go-testing-interface"
func TestHelper(t testing.T) {
t.Fatal("I failed")
}
You can call the test helper in a real test easily:
import "testing"
func TestThing(t *testing.T) {
TestHelper(t)
}
You can also call the test helper at runtime if needed:
import "github.com/mitchellh/go-testing-interface"
func main() {
TestHelper(&testing.RuntimeT{})
}
## Why?!
**Why would I call a test helper that takes a *testing.T at runtime?**
You probably shouldn't. The only use case I've seen (and I've had) for this
is to implement a "dev mode" for a service where the test helpers are used
to populate mock data, create a mock DB, perhaps run service dependencies
in-memory, etc.
Outside of a "dev mode", I've never seen a use case for this and I think
there shouldn't be one since the point of the `testing.T` interface is that
you can fail immediately.

View File

@ -0,0 +1,84 @@
// +build !go1.9
package testing
import (
"fmt"
"log"
)
// T is the interface that mimics the standard library *testing.T.
//
// In unit tests you can just pass a *testing.T struct. At runtime, outside
// of tests, you can pass in a RuntimeT struct from this package.
type T interface {
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fail()
FailNow()
Failed() bool
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Log(args ...interface{})
Logf(format string, args ...interface{})
Name() string
Skip(args ...interface{})
SkipNow()
Skipf(format string, args ...interface{})
Skipped() bool
}
// RuntimeT implements T and can be instantiated and run at runtime to
// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
// for calls to Fatal. For calls to Error, you'll have to check the errors
// list to determine whether to exit yourself. Name and Skip methods are
// unimplemented noops.
type RuntimeT struct {
failed bool
}
func (t *RuntimeT) Error(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.Fail()
}
func (t *RuntimeT) Errorf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.Fail()
}
func (t *RuntimeT) Fatal(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.FailNow()
}
func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.FailNow()
}
func (t *RuntimeT) Fail() {
t.failed = true
}
func (t *RuntimeT) FailNow() {
panic("testing.T failed, see logs for output (if any)")
}
func (t *RuntimeT) Failed() bool {
return t.failed
}
func (t *RuntimeT) Log(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
}
func (t *RuntimeT) Logf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
}
func (t *RuntimeT) Name() string { return "" }
func (t *RuntimeT) Skip(args ...interface{}) {}
func (t *RuntimeT) SkipNow() {}
func (t *RuntimeT) Skipf(format string, args ...interface{}) {}
func (t *RuntimeT) Skipped() bool { return false }

View File

@ -0,0 +1,80 @@
// +build go1.9
// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition
// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC
// and is set for release shortly. We'll support this on master as the default
// as soon as 1.9 is released.
package testing
import (
"fmt"
"log"
)
// T is the interface that mimics the standard library *testing.T.
//
// In unit tests you can just pass a *testing.T struct. At runtime, outside
// of tests, you can pass in a RuntimeT struct from this package.
type T interface {
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fail()
FailNow()
Failed() bool
Helper()
Log(args ...interface{})
Logf(format string, args ...interface{})
}
// RuntimeT implements T and can be instantiated and run at runtime to
// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
// for calls to Fatal. For calls to Error, you'll have to check the errors
// list to determine whether to exit yourself.
type RuntimeT struct {
failed bool
}
func (t *RuntimeT) Error(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.Fail()
}
func (t *RuntimeT) Errorf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.Fail()
}
func (t *RuntimeT) Fatal(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.FailNow()
}
func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.FailNow()
}
func (t *RuntimeT) Fail() {
t.failed = true
}
func (t *RuntimeT) FailNow() {
panic("testing.T failed, see logs for output (if any)")
}
func (t *RuntimeT) Failed() bool {
return t.failed
}
func (t *RuntimeT) Helper() {}
func (t *RuntimeT) Log(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
}
func (t *RuntimeT) Logf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
}

641
vendor/golang.org/x/net/http2/ciphers.go generated vendored Normal file
View File

@ -0,0 +1,641 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
// A list of the possible cipher suite ids. Taken from
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
// Reserved uint16 = 0x001C-1D
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
// Reserved uint16 = 0x0047-4F
// Reserved uint16 = 0x0050-58
// Reserved uint16 = 0x0059-5C
// Unassigned uint16 = 0x005D-5F
// Reserved uint16 = 0x0060-66
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
// Unassigned uint16 = 0x006E-83
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
// Unassigned uint16 = 0x00C6-FE
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
// Unassigned uint16 = 0x01-55,*
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
// Unassigned uint16 = 0x5601 - 0xC000
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
// Unassigned uint16 = 0xC0B0-FF
// Unassigned uint16 = 0xC1-CB,*
// Unassigned uint16 = 0xCC00-A7
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
)
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
// References:
// https://tools.ietf.org/html/rfc7540#appendix-A
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
func isBadCipher(cipher uint16) bool {
switch cipher {
case cipher_TLS_NULL_WITH_NULL_NULL,
cipher_TLS_RSA_WITH_NULL_MD5,
cipher_TLS_RSA_WITH_NULL_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_RSA_WITH_RC4_128_MD5,
cipher_TLS_RSA_WITH_RC4_128_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_RSA_WITH_DES_CBC_SHA,
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_RC4_128_SHA,
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
cipher_TLS_KRB5_WITH_RC4_128_MD5,
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_PSK_WITH_NULL_SHA,
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_NULL_SHA256,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_PSK_WITH_RC4_128_SHA,
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_PSK_WITH_NULL_SHA256,
cipher_TLS_PSK_WITH_NULL_SHA384,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_AES_128_CCM,
cipher_TLS_RSA_WITH_AES_256_CCM,
cipher_TLS_RSA_WITH_AES_128_CCM_8,
cipher_TLS_RSA_WITH_AES_256_CCM_8,
cipher_TLS_PSK_WITH_AES_128_CCM,
cipher_TLS_PSK_WITH_AES_256_CCM,
cipher_TLS_PSK_WITH_AES_128_CCM_8,
cipher_TLS_PSK_WITH_AES_256_CCM_8:
return true
default:
return false
}
}

View File

@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
} }
// registerHTTPSProtocol calls Transport.RegisterProtocol but // registerHTTPSProtocol calls Transport.RegisterProtocol but
// convering panics into errors. // converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {

146
vendor/golang.org/x/net/http2/databuffer.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"sync"
)
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}

View File

@ -87,13 +87,16 @@ type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
// connErrorReason wraps a ConnectionError with an informative error about why it occurs. // connError represents an HTTP/2 ConnectionError error code, along
// with a string (for debugging) explaining why.
//
// Errors of this type are only returned by the frame parser functions // Errors of this type are only returned by the frame parser functions
// and converted into ConnectionError(ErrCodeProtocol). // and converted into ConnectionError(Code), after stashing away
// the Reason into the Framer's errDetail field, accessible via
// the (*Framer).ErrorDetail method.
type connError struct { type connError struct {
Code ErrCode Code ErrCode // the ConnectionError error code
Reason string Reason string // additional reason
} }
func (e connError) Error() string { func (e connError) Error() string {

View File

@ -1,60 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
)
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
// It never allocates, but moves old data as new data is written.
type fixedBuffer struct {
buf []byte
r, w int
}
var (
errReadEmpty = errors.New("read from empty fixedBuffer")
errWriteFull = errors.New("write on full fixedBuffer")
)
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
if b.r == b.w {
return 0, errReadEmpty
}
n = copy(p, b.buf[b.r:b.w])
b.r += n
if b.r == b.w {
b.r = 0
b.w = 0
}
return n, nil
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *fixedBuffer) Len() int {
return b.w - b.r
}
// Write copies bytes from p into the buffer.
// It is an error to write more data than the buffer can hold.
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
// Slide existing data to beginning.
if b.r > 0 && len(p) > len(b.buf)-b.w {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
// Write new data.
n = copy(b.buf[b.w:], p)
b.w += n
if n < len(p) {
err = errWriteFull
}
return n, err
}

View File

@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload // a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which // bytes. The length of payload will always equal fh.Length (which
// might be 0). // might be 0).
type frameParser func(fh FrameHeader, payload []byte) (Frame, error) type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{ var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame, FrameData: parseDataFrame,
@ -323,6 +323,8 @@ type Framer struct {
debugFramerBuf *bytes.Buffer debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{}) debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{}) debugWriteLoggerf func(string, ...interface{})
frameCache *frameCache // nil if frames aren't reused (default)
} }
func (fr *Framer) maxHeaderListSize() uint32 { func (fr *Framer) maxHeaderListSize() uint32 {
@ -398,6 +400,27 @@ const (
maxFrameSize = 1<<24 - 1 maxFrameSize = 1<<24 - 1
) )
// SetReuseFrames allows the Framer to reuse Frames.
// If called on a Framer, Frames returned by calls to ReadFrame are only
// valid until the next call to ReadFrame.
func (fr *Framer) SetReuseFrames() {
if fr.frameCache != nil {
return
}
fr.frameCache = &frameCache{}
}
type frameCache struct {
dataFrame DataFrame
}
func (fc *frameCache) getDataFrame() *DataFrame {
if fc == nil {
return &DataFrame{}
}
return &fc.dataFrame
}
// NewFramer returns a Framer that writes frames to w and reads them from r. // NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer { func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{ fr := &Framer{
@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil { if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err return nil, err
} }
f, err := typeFrameParser(fh.Type)(fh, payload) f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil { if err != nil {
if ce, ok := err.(connError); ok { if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason) return nil, fr.connError(ce.Code, ce.Reason)
@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte {
return f.data return f.data
} }
func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a // DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier // DATA frame is received whose stream identifier
@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// PROTOCOL_ERROR. // PROTOCOL_ERROR.
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
} }
f := &DataFrame{ f := fc.getDataFrame()
FrameHeader: fh, f.FrameHeader = fh
}
var padSize byte var padSize byte
if fh.Flags.Has(FlagDataPadded) { if fh.Flags.Has(FlagDataPadded) {
var err error var err error
@ -600,6 +623,7 @@ var (
errStreamID = errors.New("invalid stream ID") errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID") errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large") errPadLength = errors.New("pad length too large")
errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
) )
func validStreamIDOrZero(streamID uint32) bool { func validStreamIDOrZero(streamID uint32) bool {
@ -623,6 +647,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
// //
// If pad is nil, the padding bit is not sent. // If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes. // The length of pad must not exceed 255 bytes.
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
// //
// It will perform exactly one Write to the underlying Writer. // It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size // It is the caller's responsibility not to violate the maximum frame size
@ -631,9 +656,19 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
if !validStreamID(streamID) && !f.AllowIllegalWrites { if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID return errStreamID
} }
if len(pad) > 0 {
if len(pad) > 255 { if len(pad) > 255 {
return errPadLength return errPadLength
} }
if !f.AllowIllegalWrites {
for _, b := range pad {
if b != 0 {
// "Padding octets MUST be set to zero when sending."
return errPadBytes
}
}
}
}
var flags Flags var flags Flags
if endStream { if endStream {
flags |= FlagDataEndStream flags |= FlagDataEndStream
@ -660,7 +695,7 @@ type SettingsFrame struct {
p []byte p []byte
} }
func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the // When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame MUST be empty. Receipt of a
@ -762,7 +797,7 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 { if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -802,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData return f.debugData
} }
func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID != 0 { if fh.StreamID != 0 {
return nil, ConnectionError(ErrCodeProtocol) return nil, ConnectionError(ErrCodeProtocol)
} }
@ -842,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p return f.p
} }
func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil return &UnknownFrame{fh, p}, nil
} }
@ -853,7 +888,7 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set Increment uint32 // never read with high bit set
} }
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 { if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -918,7 +953,7 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority) return f.FrameHeader.Flags.Has(FlagHeadersPriority)
} }
func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
hf := &HeadersFrame{ hf := &HeadersFrame{
FrameHeader: fh, FrameHeader: fh,
} }
@ -1055,7 +1090,7 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{} return p == PriorityParam{}
} }
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
} }
@ -1102,7 +1137,7 @@ type RSTStreamFrame struct {
ErrCode ErrCode ErrCode ErrCode
} }
func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 { if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize) return nil, ConnectionError(ErrCodeFrameSize)
} }
@ -1132,7 +1167,7 @@ type ContinuationFrame struct {
headerFragBuf []byte headerFragBuf []byte
} }
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
} }
@ -1182,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
} }
func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{ pp := &PushPromiseFrame{
FrameHeader: fh, FrameHeader: fh,
} }

View File

@ -7,7 +7,6 @@
package http2 package http2
import ( import (
"crypto/tls"
"net/http" "net/http"
"time" "time"
) )
@ -15,29 +14,3 @@ import (
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return t1.ExpectContinueTimeout return t1.ExpectContinueTimeout
} }
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

View File

@ -12,7 +12,11 @@ import (
"net/http" "net/http"
) )
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } func cloneTLSConfig(c *tls.Config) *tls.Config {
c2 := c.Clone()
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
return c2
}
var _ http.Pusher = (*responseWriter)(nil) var _ http.Pusher = (*responseWriter)(nil)
@ -48,3 +52,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
func reqBodyIsNoBody(body io.ReadCloser) bool { func reqBodyIsNoBody(body io.ReadCloser) bool {
return body == http.NoBody return body == http.NoBody
} }
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only

16
vendor/golang.org/x/net/http2/go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
return nil
}

View File

@ -39,6 +39,7 @@ func NewEncoder(w io.Writer) *Encoder {
tableSizeUpdate: false, tableSizeUpdate: false,
w: w, w: w,
} }
e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize) e.dynTab.setMaxSize(initialHeaderTableSize)
return e return e
} }
@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error {
// only name matches, i points to that index and nameValueMatch // only name matches, i points to that index and nameValueMatch
// becomes false. // becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
for idx, hf := range staticTable { i, nameValueMatch = staticTable.search(f)
if !constantTimeStringCompare(hf.Name, f.Name) { if nameValueMatch {
continue return i, true
}
if i == 0 {
i = uint64(idx + 1)
}
if f.Sensitive {
continue
}
if !constantTimeStringCompare(hf.Value, f.Value) {
continue
}
i = uint64(idx + 1)
nameValueMatch = true
return
} }
j, nameValueMatch := e.dynTab.search(f) j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) { if nameValueMatch || (i == 0 && j != 0) {
i = j + uint64(len(staticTable)) return j + uint64(staticTable.len()), nameValueMatch
} }
return
return i, false
} }
// SetMaxDynamicTableSize changes the dynamic header table size to v. // SetMaxDynamicTableSize changes the dynamic header table size to v.

View File

@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
emit: emitFunc, emit: emitFunc,
emitEnabled: true, emitEnabled: true,
} }
d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize) d.dynTab.setMaxSize(maxDynamicTableSize)
return d return d
@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
} }
type dynamicTable struct { type dynamicTable struct {
// ents is the FIFO described at
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
// The newest (low index) is append at the end, and items are table headerFieldTable
// evicted from the front. size uint32 // in bytes
ents []HeaderField
size uint32
maxSize uint32 // current maxSize maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive allowedMaxSize uint32 // maxSize may go up to this, inclusive
} }
@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
dt.evict() dt.evict()
} }
// TODO: change dynamicTable to be a struct with a slice and a size int field,
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
//
//
// Then make add increment the size. maybe the max size should move from Decoder to
// dynamicTable and add should return an ok bool if there was enough space.
//
// Later we'll need a remove operation on dynamicTable.
func (dt *dynamicTable) add(f HeaderField) { func (dt *dynamicTable) add(f HeaderField) {
dt.ents = append(dt.ents, f) dt.table.addEntry(f)
dt.size += f.Size() dt.size += f.Size()
dt.evict() dt.evict()
} }
// If we're too big, evict old stuff (front of the slice) // If we're too big, evict old stuff.
func (dt *dynamicTable) evict() { func (dt *dynamicTable) evict() {
base := dt.ents // keep base pointer of slice var n int
for dt.size > dt.maxSize { for dt.size > dt.maxSize && n < dt.table.len() {
dt.size -= dt.ents[0].Size() dt.size -= dt.table.ents[n].Size()
dt.ents = dt.ents[1:] n++
} }
dt.table.evictOldest(n)
// Shift slice contents down if we evicted things.
if len(dt.ents) != len(base) {
copy(base, dt.ents)
dt.ents = base[:len(dt.ents)]
}
}
// constantTimeStringCompare compares string a and b in a constant
// time manner.
func constantTimeStringCompare(a, b string) bool {
if len(a) != len(b) {
return false
}
c := byte(0)
for i := 0; i < len(a); i++ {
c |= a[i] ^ b[i]
}
return c == 0
}
// Search searches f in the table. The return value i is 0 if there is
// no name match. If there is name match or name/value match, i is the
// index of that entry (1-based). If both name and value match,
// nameValueMatch becomes true.
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
l := len(dt.ents)
for j := l - 1; j >= 0; j-- {
ent := dt.ents[j]
if !constantTimeStringCompare(ent.Name, f.Name) {
continue
}
if i == 0 {
i = uint64(l - j)
}
if f.Sensitive {
continue
}
if !constantTimeStringCompare(ent.Value, f.Value) {
continue
}
i = uint64(l - j)
nameValueMatch = true
return
}
return
} }
func (d *Decoder) maxTableIndex() int { func (d *Decoder) maxTableIndex() int {
return len(d.dynTab.ents) + len(staticTable) // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
// the dynamic table to 2^32 bytes, where each entry will occupy more than
// one byte. Further, the staticTable has a fixed, small length.
return d.dynTab.table.len() + staticTable.len()
} }
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
if i < 1 { // See Section 2.3.3.
if i == 0 {
return return
} }
if i <= uint64(staticTable.len()) {
return staticTable.ents[i-1], true
}
if i > uint64(d.maxTableIndex()) { if i > uint64(d.maxTableIndex()) {
return return
} }
if i <= uint64(len(staticTable)) { // In the dynamic table, newer entries have lower indices.
return staticTable[i-1], true // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
} // the reversed dynamic table.
dents := d.dynTab.ents dt := d.dynTab.table
return dents[len(dents)-(int(i)-len(staticTable))], true return dt.ents[dt.len()-(int(i)-staticTable.len())], true
} }
// Decode decodes an entire block. // Decode decodes an entire block.

View File

@ -4,73 +4,200 @@
package hpack package hpack
func pair(name, value string) HeaderField { import (
return HeaderField{Name: name, Value: value} "fmt"
)
// headerFieldTable implements a list of HeaderFields.
// This is used to implement the static and dynamic tables.
type headerFieldTable struct {
// For static tables, entries are never evicted.
//
// For dynamic tables, entries are evicted from ents[0] and added to the end.
// Each entry has a unique id that starts at one and increments for each
// entry that is added. This unique id is stable across evictions, meaning
// it can be used as a pointer to a specific entry. As in hpack, unique ids
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
//
// Zero is not a valid unique id.
//
// evictCount should not overflow in any remotely practical situation. In
// practice, we will have one dynamic table per HTTP/2 connection. If we
// assume a very powerful server that handles 1M QPS per connection and each
// request adds (then evicts) 100 entries from the table, it would still take
// 2M years for evictCount to overflow.
ents []HeaderField
evictCount uint64
// byName maps a HeaderField name to the unique id of the newest entry with
// the same name. See above for a definition of "unique id".
byName map[string]uint64
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
// entry with the same name and value. See above for a definition of "unique id".
byNameValue map[pairNameValue]uint64
}
type pairNameValue struct {
name, value string
}
func (t *headerFieldTable) init() {
t.byName = make(map[string]uint64)
t.byNameValue = make(map[pairNameValue]uint64)
}
// len reports the number of entries in the table.
func (t *headerFieldTable) len() int {
return len(t.ents)
}
// addEntry adds a new entry.
func (t *headerFieldTable) addEntry(f HeaderField) {
id := uint64(t.len()) + t.evictCount + 1
t.byName[f.Name] = id
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
t.ents = append(t.ents, f)
}
// evictOldest evicts the n oldest entries in the table.
func (t *headerFieldTable) evictOldest(n int) {
if n > t.len() {
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
}
for k := 0; k < n; k++ {
f := t.ents[k]
id := t.evictCount + uint64(k) + 1
if t.byName[f.Name] == id {
delete(t.byName, f.Name)
}
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
delete(t.byNameValue, p)
}
}
copy(t.ents, t.ents[n:])
for k := t.len() - n; k < t.len(); k++ {
t.ents[k] = HeaderField{} // so strings can be garbage collected
}
t.ents = t.ents[:t.len()-n]
if t.evictCount+uint64(n) < t.evictCount {
panic("evictCount overflow")
}
t.evictCount += uint64(n)
}
// search finds f in the table. If there is no match, i is 0.
// If both name and value match, i is the matched index and nameValueMatch
// becomes true. If only name matches, i points to that index and
// nameValueMatch becomes false.
//
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
// table, the return value i actually refers to the entry t.ents[t.len()-i].
//
// All tables are assumed to be a dynamic tables except for the global
// staticTable pointer.
//
// See Section 2.3.3.
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
if !f.Sensitive {
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
return t.idToIndex(id), true
}
}
if id := t.byName[f.Name]; id != 0 {
return t.idToIndex(id), false
}
return 0, false
}
// idToIndex converts a unique id to an HPACK index.
// See Section 2.3.3.
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
if id <= t.evictCount {
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
}
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
if t != staticTable {
return uint64(t.len()) - k // dynamic table
}
return k + 1
} }
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
var staticTable = [...]HeaderField{ var staticTable = newStaticTable()
pair(":authority", ""), // index 1 (1-based) var staticTableEntries = [...]HeaderField{
pair(":method", "GET"), {Name: ":authority"},
pair(":method", "POST"), {Name: ":method", Value: "GET"},
pair(":path", "/"), {Name: ":method", Value: "POST"},
pair(":path", "/index.html"), {Name: ":path", Value: "/"},
pair(":scheme", "http"), {Name: ":path", Value: "/index.html"},
pair(":scheme", "https"), {Name: ":scheme", Value: "http"},
pair(":status", "200"), {Name: ":scheme", Value: "https"},
pair(":status", "204"), {Name: ":status", Value: "200"},
pair(":status", "206"), {Name: ":status", Value: "204"},
pair(":status", "304"), {Name: ":status", Value: "206"},
pair(":status", "400"), {Name: ":status", Value: "304"},
pair(":status", "404"), {Name: ":status", Value: "400"},
pair(":status", "500"), {Name: ":status", Value: "404"},
pair("accept-charset", ""), {Name: ":status", Value: "500"},
pair("accept-encoding", "gzip, deflate"), {Name: "accept-charset"},
pair("accept-language", ""), {Name: "accept-encoding", Value: "gzip, deflate"},
pair("accept-ranges", ""), {Name: "accept-language"},
pair("accept", ""), {Name: "accept-ranges"},
pair("access-control-allow-origin", ""), {Name: "accept"},
pair("age", ""), {Name: "access-control-allow-origin"},
pair("allow", ""), {Name: "age"},
pair("authorization", ""), {Name: "allow"},
pair("cache-control", ""), {Name: "authorization"},
pair("content-disposition", ""), {Name: "cache-control"},
pair("content-encoding", ""), {Name: "content-disposition"},
pair("content-language", ""), {Name: "content-encoding"},
pair("content-length", ""), {Name: "content-language"},
pair("content-location", ""), {Name: "content-length"},
pair("content-range", ""), {Name: "content-location"},
pair("content-type", ""), {Name: "content-range"},
pair("cookie", ""), {Name: "content-type"},
pair("date", ""), {Name: "cookie"},
pair("etag", ""), {Name: "date"},
pair("expect", ""), {Name: "etag"},
pair("expires", ""), {Name: "expect"},
pair("from", ""), {Name: "expires"},
pair("host", ""), {Name: "from"},
pair("if-match", ""), {Name: "host"},
pair("if-modified-since", ""), {Name: "if-match"},
pair("if-none-match", ""), {Name: "if-modified-since"},
pair("if-range", ""), {Name: "if-none-match"},
pair("if-unmodified-since", ""), {Name: "if-range"},
pair("last-modified", ""), {Name: "if-unmodified-since"},
pair("link", ""), {Name: "last-modified"},
pair("location", ""), {Name: "link"},
pair("max-forwards", ""), {Name: "location"},
pair("proxy-authenticate", ""), {Name: "max-forwards"},
pair("proxy-authorization", ""), {Name: "proxy-authenticate"},
pair("range", ""), {Name: "proxy-authorization"},
pair("referer", ""), {Name: "range"},
pair("refresh", ""), {Name: "referer"},
pair("retry-after", ""), {Name: "refresh"},
pair("server", ""), {Name: "retry-after"},
pair("set-cookie", ""), {Name: "server"},
pair("strict-transport-security", ""), {Name: "set-cookie"},
pair("transfer-encoding", ""), {Name: "strict-transport-security"},
pair("user-agent", ""), {Name: "transfer-encoding"},
pair("vary", ""), {Name: "user-agent"},
pair("via", ""), {Name: "vary"},
pair("www-authenticate", ""), {Name: "via"},
{Name: "www-authenticate"},
}
func newStaticTable() *headerFieldTable {
t := &headerFieldTable{}
t.init()
for _, e := range staticTableEntries[:] {
t.addEntry(e)
}
return t
} }
var huffmanCodes = [256]uint32{ var huffmanCodes = [256]uint32{

View File

@ -376,12 +376,16 @@ func (s *sorter) SortStrings(ss []string) {
// validPseudoPath reports whether v is a valid :path pseudo-header // validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either: // value. It must be either:
// //
// *) a non-empty string starting with '/', but not with with "//", // *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests. // *) the string '*', for OPTIONS requests.
// //
// For now this is only used a quick check for deciding when to clean // For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport. // up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847 // See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool { func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" return (len(v) > 0 && v[0] == '/') || v == "*"
} }

View File

@ -7,7 +7,6 @@
package http2 package http2
import ( import (
"crypto/tls"
"net/http" "net/http"
"time" "time"
) )
@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return 0 return 0
} }
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

View File

@ -25,3 +25,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
} }
func reqBodyIsNoBody(io.ReadCloser) bool { return false } func reqBodyIsNoBody(io.ReadCloser) bool { return false }
func go18httpNoBody() io.ReadCloser { return nil } // for tests only

16
vendor/golang.org/x/net/http2/not_go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
// not supported prior to go1.9
return nil
}

View File

@ -16,7 +16,7 @@ import (
type pipe struct { type pipe struct {
mu sync.Mutex mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer b pipeBuffer // nil when done reading
err error // read error once empty. non-nil means closed. err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b) breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error donec chan struct{} // closed on error
@ -32,6 +32,9 @@ type pipeBuffer interface {
func (p *pipe) Len() int { func (p *pipe) Len() int {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
if p.b == nil {
return 0
}
return p.b.Len() return p.b.Len()
} }
@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
if p.breakErr != nil { if p.breakErr != nil {
return 0, p.breakErr return 0, p.breakErr
} }
if p.b.Len() > 0 { if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d) return p.b.Read(d)
} }
if p.err != nil { if p.err != nil {
@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
p.readFn() // e.g. copy trailers p.readFn() // e.g. copy trailers
p.readFn = nil // not sticky like p.err p.readFn = nil // not sticky like p.err
} }
p.b = nil
return 0, p.err return 0, p.err
} }
p.c.Wait() p.c.Wait()
@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
if p.err != nil { if p.err != nil {
return 0, errClosedPipeWrite return 0, errClosedPipeWrite
} }
if p.breakErr != nil {
return len(d), nil // discard when there is no reader
}
return p.b.Write(d) return p.b.Write(d)
} }
@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
return return
} }
p.readFn = fn p.readFn = fn
if dst == &p.breakErr {
p.b = nil
}
*dst = err *dst = err
p.closeDoneLocked() p.closeDoneLocked()
} }

View File

@ -110,9 +110,41 @@ type Server struct {
// activity for the purposes of IdleTimeout. // activity for the purposes of IdleTimeout.
IdleTimeout time.Duration IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
// If the value is outside this range, a default value will be
// used instead.
MaxUploadBufferPerConnection int32
// MaxUploadBufferPerStream is the size of the initial flow control
// window for each stream. The HTTP/2 spec does not allow this to
// be larger than 2^32-1. If the value is zero or larger than the
// maximum, a default value will be used instead.
MaxUploadBufferPerStream int32
// NewWriteScheduler constructs a write scheduler for a connection. // NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen. // If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler NewWriteScheduler func() WriteScheduler
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
}
func (s *Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection > initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
} }
func (s *Server) maxReadFrameSize() uint32 { func (s *Server) maxReadFrameSize() uint32 {
@ -129,6 +161,40 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams return defaultMaxStreams
} }
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
}
func (s *serverInternalState) registerConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
s.activeConns[sc] = struct{}{}
s.mu.Unlock()
}
func (s *serverInternalState) unregisterConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
delete(s.activeConns, sc)
s.mu.Unlock()
}
func (s *serverInternalState) startGracefulShutdown() {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
for sc := range s.activeConns {
sc.startGracefulShutdown()
}
s.mu.Unlock()
}
// ConfigureServer adds HTTP/2 support to a net/http Server. // ConfigureServer adds HTTP/2 support to a net/http Server.
// //
// The configuration conf may be nil. // The configuration conf may be nil.
@ -141,9 +207,13 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil { if conf == nil {
conf = new(Server) conf = new(Server)
} }
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
if err := configureServer18(s, conf); err != nil { if err := configureServer18(s, conf); err != nil {
return err return err
} }
if err := configureServer19(s, conf); err != nil {
return err
}
if s.TLSConfig == nil { if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config) s.TLSConfig = new(tls.Config)
@ -265,25 +335,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
streams: make(map[uint32]*stream), streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult), readFrameCh: make(chan readFrameResult),
wantWriteFrameCh: make(chan FrameWriteRequest, 8), wantWriteFrameCh: make(chan FrameWriteRequest, 8),
wantStartPushCh: make(chan startPushRequest, 8), serveMsgCh: make(chan interface{}, 8),
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}), doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(), advMaxStreams: s.maxConcurrentStreams(),
initialWindowSize: initialWindowSize, initialStreamSendWindowSize: initialWindowSize,
maxFrameSize: initialMaxFrameSize, maxFrameSize: initialMaxFrameSize,
headerTableSize: initialHeaderTableSize, headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(), serveG: newGoroutineLock(),
pushEnabled: true, pushEnabled: true,
} }
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
// The net/http package sets the write deadline from the // The net/http package sets the write deadline from the
// http.Server.WriteTimeout during the TLS handshake, but then // http.Server.WriteTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already // passes the connection off to us with the deadline already set.
// set. Disarm it here so that it is not applied to additional // Write deadlines are set per stream in serverConn.newStream.
// streams opened on this connection. // Disarm the net.Conn write deadline here.
// TODO: implement WriteTimeout fully. See Issue 18437.
if sc.hs.WriteTimeout != 0 { if sc.hs.WriteTimeout != 0 {
sc.conn.SetWriteDeadline(time.Time{}) sc.conn.SetWriteDeadline(time.Time{})
} }
@ -294,6 +366,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.writeSched = NewRandomWriteScheduler() sc.writeSched = NewRandomWriteScheduler()
} }
// These start at the RFC-specified defaults. If there is a higher
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize) sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize) sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@ -376,10 +451,9 @@ type serverConn struct {
doneServing chan struct{} // closed when serverConn.serve ends doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan readFrameResult // written by serverConn.readFrames readFrameCh chan readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
wantStartPushCh chan startPushRequest // from handlers -> serve
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve bodyReadCh chan bodyReadMsg // from handlers -> serve
testHookCh chan func(int) // code to run on the serve loop serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow flow // conn-wide (not stream-specific) outbound flow control flow flow // conn-wide (not stream-specific) outbound flow control
inflow flow // conn-wide inbound flow control inflow flow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http tlsState *tls.ConnectionState // shared by all handlers, like net/http
@ -399,7 +473,7 @@ type serverConn struct {
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream streams map[uint32]*stream
initialWindowSize int32 initialStreamSendWindowSize int32
maxFrameSize int32 maxFrameSize int32
headerTableSize uint32 headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default) peerMaxHeaderListSize uint32 // zero means unknown (default)
@ -411,14 +485,15 @@ type serverConn struct {
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode goAwayCode ErrCode
shutdownTimerCh <-chan time.Time // nil until used
shutdownTimer *time.Timer // nil until used shutdownTimer *time.Timer // nil until used
idleTimer *time.Timer // nil if unused idleTimer *time.Timer // nil if unused
idleTimerCh <-chan time.Time // nil if unused
// Owned by the writeFrameAsync goroutine: // Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder hpackEncoder *hpack.Encoder
// Used by startGracefulShutdown.
shutdownOnce sync.Once
} }
func (sc *serverConn) maxHeaderListSize() uint32 { func (sc *serverConn) maxHeaderListSize() uint32 {
@ -466,7 +541,7 @@ type stream struct {
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100) wroteHeaders bool // whether we wrote headers (not status 100)
reqBuf []byte // if non-nil, body pipe buffer to return later at EOF writeDeadline *time.Timer // nil if unused
trailer http.Header // accumulated trailers trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer reqTrailer http.Header // handler's Request.Trailer
@ -696,15 +771,17 @@ func (sc *serverConn) serve() {
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
// TODO: more actual settings, notably
// SettingInitialWindowSize, but then we also
// want to bump up the conn window size the
// same amount here right after the settings
}, },
}) })
sc.unackedSettings++ sc.unackedSettings++
// Each connection starts with intialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
if err := sc.readPreface(); err != nil { if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return return
@ -717,27 +794,25 @@ func (sc *serverConn) serve() {
sc.setConnState(http.StateIdle) sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout != 0 { if sc.srv.IdleTimeout != 0 {
sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop() defer sc.idleTimer.Stop()
sc.idleTimerCh = sc.idleTimer.C
}
var gracefulShutdownCh <-chan struct{}
if sc.hs != nil {
gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
} }
go sc.readFrames() // closed by defer sc.conn.Close above go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := time.NewTimer(firstSettingsTimeout) settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
loopNum := 0 loopNum := 0
for { for {
loopNum++ loopNum++
select { select {
case wr := <-sc.wantWriteFrameCh: case wr := <-sc.wantWriteFrameCh:
if se, ok := wr.write.(StreamError); ok {
sc.resetStream(se)
break
}
sc.writeFrame(wr) sc.writeFrame(wr)
case spr := <-sc.wantStartPushCh:
sc.startPush(spr)
case res := <-sc.wroteFrameCh: case res := <-sc.wroteFrameCh:
sc.wroteFrame(res) sc.wroteFrame(res)
case res := <-sc.readFrameCh: case res := <-sc.readFrameCh:
@ -745,26 +820,37 @@ func (sc *serverConn) serve() {
return return
} }
res.readMore() res.readMore()
if settingsTimer.C != nil { if settingsTimer != nil {
settingsTimer.Stop() settingsTimer.Stop()
settingsTimer.C = nil settingsTimer = nil
} }
case m := <-sc.bodyReadCh: case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n) sc.noteBodyRead(m.st, m.n)
case <-settingsTimer.C: case msg := <-sc.serveMsgCh:
switch v := msg.(type) {
case func(int):
v(loopNum) // for testing
case *serverMessage:
switch v {
case settingsTimerMsg:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return return
case <-gracefulShutdownCh: case idleTimerMsg:
gracefulShutdownCh = nil
sc.startGracefulShutdown()
case <-sc.shutdownTimerCh:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case <-sc.idleTimerCh:
sc.vlogf("connection is idle") sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo) sc.goAway(ErrCodeNo)
case fn := <-sc.testHookCh: case shutdownTimerMsg:
fn(loopNum) sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
default:
panic("unknown timer")
}
case *startPushRequest:
sc.startPush(v)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
} }
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
@ -773,6 +859,36 @@ func (sc *serverConn) serve() {
} }
} }
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
sc.serveG.checkNotOn() // NOT
select {
case sc.serveMsgCh <- msg:
case <-sc.doneServing:
}
}
// readPreface reads the ClientPreface greeting from the peer // readPreface reads the ClientPreface greeting from the peer
// or returns an error on timeout or an invalid greeting. // or returns an error on timeout or an invalid greeting.
func (sc *serverConn) readPreface() error { func (sc *serverConn) readPreface() error {
@ -1014,7 +1130,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// stateClosed after the RST_STREAM frame is // stateClosed after the RST_STREAM frame is
// written. // written.
st.state = stateHalfClosedLocal st.state = stateHalfClosedLocal
sc.resetStream(streamError(st.id, ErrCodeCancel)) // Section 8.1: a server MAY request that the client abort
// transmission of a request without error by sending a
// RST_STREAM with an error code of NO_ERROR after sending
// a complete response.
sc.resetStream(streamError(st.id, ErrCodeNo))
case stateHalfClosedRemote: case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete) sc.closeStream(st, errHandlerComplete)
} }
@ -1086,10 +1206,19 @@ func (sc *serverConn) scheduleFrameWrite() {
sc.inFrameScheduleLoop = false sc.inFrameScheduleLoop = false
} }
// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the // startGracefulShutdown gracefully shuts down a connection. This
// client we're gracefully shutting down. The connection isn't closed // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
// until all current streams are done. // shutting down. The connection isn't closed until all current
// streams are done.
//
// startGracefulShutdown returns immediately; it does not wait until
// the connection has shut down.
func (sc *serverConn) startGracefulShutdown() { func (sc *serverConn) startGracefulShutdown() {
sc.serveG.checkNotOn() // NOT
sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
}
func (sc *serverConn) startGracefulShutdownInternal() {
sc.goAwayIn(ErrCodeNo, 0) sc.goAwayIn(ErrCodeNo, 0)
} }
@ -1121,8 +1250,7 @@ func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
func (sc *serverConn) shutDownIn(d time.Duration) { func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check() sc.serveG.check()
sc.shutdownTimer = time.NewTimer(d) sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
sc.shutdownTimerCh = sc.shutdownTimer.C
} }
func (sc *serverConn) resetStream(se StreamError) { func (sc *serverConn) resetStream(se StreamError) {
@ -1305,6 +1433,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
} }
st.state = stateClosed st.state = stateClosed
if st.writeDeadline != nil {
st.writeDeadline.Stop()
}
if st.isPushed() { if st.isPushed() {
sc.curPushedStreams-- sc.curPushedStreams--
} else { } else {
@ -1317,7 +1448,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
sc.idleTimer.Reset(sc.srv.IdleTimeout) sc.idleTimer.Reset(sc.srv.IdleTimeout)
} }
if h1ServerKeepAlivesDisabled(sc.hs) { if h1ServerKeepAlivesDisabled(sc.hs) {
sc.startGracefulShutdown() sc.startGracefulShutdownInternal()
} }
} }
if p := st.body; p != nil { if p := st.body; p != nil {
@ -1395,9 +1526,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// adjust the size of all stream flow control windows that it // adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the // maintains by the difference between the new value and the
// old value." // old value."
old := sc.initialWindowSize old := sc.initialStreamSendWindowSize
sc.initialWindowSize = int32(val) sc.initialStreamSendWindowSize = int32(val)
growth := sc.initialWindowSize - old // may be negative growth := int32(val) - old // may be negative
for _, st := range sc.streams { for _, st := range sc.streams {
if !st.flow.add(growth) { if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size // 6.9.2 Initial Flow Control Window Size
@ -1504,7 +1635,7 @@ func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
} else { } else {
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
} }
sc.startGracefulShutdown() sc.startGracefulShutdownInternal()
// http://tools.ietf.org/html/rfc7540#section-6.8 // http://tools.ietf.org/html/rfc7540#section-6.8
// We should not create any new streams, which means we should disable push. // We should not create any new streams, which means we should disable push.
sc.pushEnabled = false sc.pushEnabled = false
@ -1543,6 +1674,12 @@ func (st *stream) copyTrailersToHandlerRequest() {
} }
} }
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's WriteTimeout has fired.
func (st *stream) onWriteTimeout() {
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
}
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check() sc.serveG.check()
id := f.StreamID id := f.StreamID
@ -1719,9 +1856,12 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
} }
st.cw.Init() st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialWindowSize) st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter st.inflow.conn = &sc.inflow // link to conn-level counter
st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings st.inflow.add(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st sc.streams[id] = st
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
@ -1785,16 +1925,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, err return nil, nil, err
} }
if bodyOpen { if bodyOpen {
st.reqBuf = getRequestBodyBuf()
req.Body.(*requestBody).pipe = &pipe{
b: &fixedBuffer{buf: st.reqBuf},
}
if vv, ok := rp.header["Content-Length"]; ok { if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else { } else {
req.ContentLength = -1 req.ContentLength = -1
} }
req.Body.(*requestBody).pipe = &pipe{
b: &dataBuffer{expected: req.ContentLength},
}
} }
return rw, req, nil return rw, req, nil
} }
@ -1890,24 +2028,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
return rw, req, nil return rw, req, nil
} }
var reqBodyCache = make(chan []byte, 8)
func getRequestBodyBuf() []byte {
select {
case b := <-reqBodyCache:
return b
default:
return make([]byte, initialWindowSize)
}
}
func putRequestBodyBuf(b []byte) {
select {
case reqBodyCache <- b:
default:
}
}
// Run on its own goroutine. // Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
didPanic := true didPanic := true
@ -2003,12 +2123,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
case <-sc.doneServing: case <-sc.doneServing:
} }
} }
if err == io.EOF {
if buf := st.reqBuf; buf != nil {
st.reqBuf = nil // shouldn't matter; field unused by other
putRequestBodyBuf(buf)
}
}
} }
func (sc *serverConn) noteBodyRead(st *stream, n int) { func (sc *serverConn) noteBodyRead(st *stream, n int) {
@ -2138,6 +2252,7 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame? sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64 wroteBytes int64
@ -2219,6 +2334,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date, date: date,
}) })
if err != nil { if err != nil {
rws.dirty = true
return 0, err return 0, err
} }
if endStream { if endStream {
@ -2240,6 +2356,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream { if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream. // only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err return 0, err
} }
} }
@ -2251,6 +2368,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers, trailers: rws.trailers,
endStream: true, endStream: true,
}) })
if err != nil {
rws.dirty = true
}
return len(p), err return len(p), err
} }
return len(p), nil return len(p), nil
@ -2390,7 +2510,7 @@ func cloneHeader(h http.Header) http.Header {
// //
// * Handler calls w.Write or w.WriteString -> // * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) -> // * -> rws.bw (*bufio.Writer) ->
// * (Handler migth call Flush) // * (Handler might call Flush)
// * -> chunkWriter{rws} // * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there) // * -> responseWriterState.writeChunk (most of the magic; see comment there)
@ -2429,10 +2549,19 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() { func (w *responseWriter) handlerDone() {
rws := w.rws rws := w.rws
dirty := rws.dirty
rws.handlerDone = true rws.handlerDone = true
w.Flush() w.Flush()
w.rws = nil w.rws = nil
if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
responseWriterStatePool.Put(rws) responseWriterStatePool.Put(rws)
}
} }
// Push errors. // Push errors.
@ -2514,7 +2643,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
return fmt.Errorf("method %q must be GET or HEAD", opts.Method) return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
} }
msg := startPushRequest{ msg := &startPushRequest{
parent: st, parent: st,
method: opts.Method, method: opts.Method,
url: u, url: u,
@ -2527,7 +2656,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
return errClientDisconnected return errClientDisconnected
case <-st.cw: case <-st.cw:
return errStreamClosed return errStreamClosed
case sc.wantStartPushCh <- msg: case sc.serveMsgCh <- msg:
} }
select { select {
@ -2549,7 +2678,7 @@ type startPushRequest struct {
done chan error done chan error
} }
func (sc *serverConn) startPush(msg startPushRequest) { func (sc *serverConn) startPush(msg *startPushRequest) {
sc.serveG.check() sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-6.6. // http://tools.ietf.org/html/rfc7540#section-6.6.
@ -2588,7 +2717,7 @@ func (sc *serverConn) startPush(msg startPushRequest) {
// A server that is unable to establish a new stream identifier can send a GOAWAY // A server that is unable to establish a new stream identifier can send a GOAWAY
// frame so that the client is forced to open a new connection for new streams. // frame so that the client is forced to open a new connection for new streams.
if sc.maxPushPromiseID+2 >= 1<<31 { if sc.maxPushPromiseID+2 >= 1<<31 {
sc.startGracefulShutdown() sc.startGracefulShutdownInternal()
return 0, ErrPushLimitReached return 0, ErrPushLimitReached
} }
sc.maxPushPromiseID += 2 sc.maxPushPromiseID += 2
@ -2713,31 +2842,6 @@ var badTrailer = map[string]bool{
"Www-Authenticate": true, "Www-Authenticate": true,
} }
// h1ServerShutdownChan returns a channel that will be closed when the
// provided *http.Server wants to shut down.
//
// This is a somewhat hacky way to get at http1 innards. It works
// when the http2 code is bundled into the net/http package in the
// standard library. The alternatives ended up making the cmd/go tool
// depend on http Servers. This is the lightest option for now.
// This is tested via the TestServeShutdown* tests in net/http.
func h1ServerShutdownChan(hs *http.Server) <-chan struct{} {
if fn := testh1ServerShutdownChan; fn != nil {
return fn(hs)
}
var x interface{} = hs
type I interface {
getDoneChan() <-chan struct{}
}
if hs, ok := x.(I); ok {
return hs.getDoneChan()
}
return nil
}
// optional test hook for h1ServerShutdownChan.
var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{}
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why // disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way. // the code is written this way.

View File

@ -18,6 +18,7 @@ import (
"io/ioutil" "io/ioutil"
"log" "log"
"math" "math"
mathrand "math/rand"
"net" "net"
"net/http" "net/http"
"sort" "sort"
@ -164,6 +165,7 @@ type ClientConn struct {
goAwayDebug string // goAway frame's debug data, retained as a string goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated streams map[uint32]*clientStream // client-initiated
nextStreamID uint32 nextStreamID uint32
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
pings map[[8]byte]chan struct{} // in flight ping data to notification channel pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer bw *bufio.Writer
br *bufio.Reader br *bufio.Reader
@ -216,35 +218,45 @@ type clientStream struct {
resTrailer *http.Header // client's Response.Trailer resTrailer *http.Header // client's Response.Trailer
} }
// awaitRequestCancel runs in its own goroutine and waits for the user // awaitRequestCancel waits for the user to cancel a request or for the done
// to cancel a RoundTrip request, its context to expire, or for the // channel to be signaled. A non-nil error is returned only if the request was
// request to be done (any way it might be removed from the cc.streams // canceled.
// map: peer reset, successful completion, TCP connection breakage, func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
// etc)
func (cs *clientStream) awaitRequestCancel(req *http.Request) {
ctx := reqContext(req) ctx := reqContext(req)
if req.Cancel == nil && ctx.Done() == nil { if req.Cancel == nil && ctx.Done() == nil {
return return nil
} }
select { select {
case <-req.Cancel: case <-req.Cancel:
cs.cancelStream() return errRequestCanceled
cs.bufPipe.CloseWithError(errRequestCanceled)
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err()
case <-done:
return nil
}
}
// awaitRequestCancel waits for the user to cancel a request, its context to
// expire, or for the request to be done (any way it might be removed from the
// cc.streams map: peer reset, successful completion, TCP connection breakage,
// etc). If the request is canceled, then cs will be canceled and closed.
func (cs *clientStream) awaitRequestCancel(req *http.Request) {
if err := awaitRequestCancel(req, cs.done); err != nil {
cs.cancelStream() cs.cancelStream()
cs.bufPipe.CloseWithError(ctx.Err()) cs.bufPipe.CloseWithError(err)
case <-cs.done:
} }
} }
func (cs *clientStream) cancelStream() { func (cs *clientStream) cancelStream() {
cs.cc.mu.Lock() cc := cs.cc
cc.mu.Lock()
didReset := cs.didReset didReset := cs.didReset
cs.didReset = true cs.didReset = true
cs.cc.mu.Unlock() cc.mu.Unlock()
if !didReset { if !didReset {
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
cc.forgetStreamID(cs.ID)
} }
} }
@ -329,7 +341,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
} }
addr := authorityAddr(req.URL.Scheme, req.URL.Host) addr := authorityAddr(req.URL.Scheme, req.URL.Host)
for { for retry := 0; ; retry++ {
cc, err := t.connPool().GetClientConn(req, addr) cc, err := t.connPool().GetClientConn(req, addr)
if err != nil { if err != nil {
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
@ -337,10 +349,26 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
} }
traceGotConn(req, cc) traceGotConn(req, cc)
res, err := cc.RoundTrip(req) res, err := cc.RoundTrip(req)
if err != nil { if err != nil && retry <= 6 {
if req, err = shouldRetryRequest(req, err); err == nil { afterBodyWrite := false
if e, ok := err.(afterReqBodyWriteError); ok {
err = e
afterBodyWrite = true
}
if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
continue continue
} }
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
select {
case <-time.After(time.Second * time.Duration(backoff)):
continue
case <-reqContext(req).Done():
return nil, reqContext(req).Err()
}
}
} }
if err != nil { if err != nil {
t.vlogf("RoundTrip failure: %v", err) t.vlogf("RoundTrip failure: %v", err)
@ -362,22 +390,30 @@ func (t *Transport) CloseIdleConnections() {
var ( var (
errClientConnClosed = errors.New("http2: client conn is closed") errClientConnClosed = errors.New("http2: client conn is closed")
errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written")
) )
// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip.
// It is used to signal that err happened after part of Request.Body was sent to the server.
type afterReqBodyWriteError struct {
err error
}
func (e afterReqBodyWriteError) Error() string {
return e.err.Error() + "; some request body already written"
}
// shouldRetryRequest is called by RoundTrip when a request fails to get // shouldRetryRequest is called by RoundTrip when a request fails to get
// response headers. It is always called with a non-nil error. // response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a // It returns either a request to retry (either the same request, or a
// modified clone), or an error if the request can't be replayed. // modified clone), or an error if the request can't be replayed.
func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
switch err { if !canRetryError(err) {
default:
return nil, err return nil, err
case errClientConnUnusable, errClientConnGotGoAway: }
if !afterBodyWrite {
return req, nil return req, nil
case errClientConnGotGoAwayAfterSomeReqBody: }
// If the Body is nil (or http.NoBody), it's safe to reuse // If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body. // this request and its Body.
if req.Body == nil || reqBodyIsNoBody(req.Body) { if req.Body == nil || reqBodyIsNoBody(req.Body) {
@ -387,7 +423,7 @@ func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
// func defined. // func defined.
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil { if getBody == nil {
return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
} }
body, err := getBody() body, err := getBody()
if err != nil { if err != nil {
@ -396,7 +432,16 @@ func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
newReq := *req newReq := *req
newReq.Body = body newReq.Body = body
return &newReq, nil return &newReq, nil
}
func canRetryError(err error) bool {
if err == errClientConnUnusable || err == errClientConnGotGoAway {
return true
} }
if se, ok := err.(StreamError); ok {
return se.Code == ErrCodeRefusedStream
}
return false
} }
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
@ -560,6 +605,8 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
} }
} }
// CanTakeNewRequest reports whether the connection can take a new request,
// meaning it has not been closed or received or sent a GOAWAY.
func (cc *ClientConn) CanTakeNewRequest() bool { func (cc *ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
@ -571,8 +618,7 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return false return false
} }
return cc.goAway == nil && !cc.closed && return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
cc.nextStreamID < math.MaxInt32
} }
// onIdleTimeout is called from a time.AfterFunc goroutine. It will // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@ -694,7 +740,7 @@ func checkConnHeaders(req *http.Request) error {
// req.ContentLength, where 0 actually means zero (not unknown) and -1 // req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown. // means unknown.
func actualContentLength(req *http.Request) int64 { func actualContentLength(req *http.Request) int64 {
if req.Body == nil { if req.Body == nil || reqBodyIsNoBody(req.Body) {
return 0 return 0
} }
if req.ContentLength != 0 { if req.ContentLength != 0 {
@ -718,15 +764,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
hasTrailers := trailers != "" hasTrailers := trailers != ""
cc.mu.Lock() cc.mu.Lock()
cc.lastActive = time.Now() if err := cc.awaitOpenSlotForRequest(req); err != nil {
if cc.closed || !cc.canTakeNewRequestLocked() {
cc.mu.Unlock() cc.mu.Unlock()
return nil, errClientConnUnusable return nil, err
} }
body := req.Body body := req.Body
hasBody := body != nil
contentLen := actualContentLength(req) contentLen := actualContentLength(req)
hasBody := contentLen != 0
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool var requestedGzip bool
@ -816,14 +861,13 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cs.abortRequestBodyWrite(errStopReqBodyWrite) cs.abortRequestBodyWrite(errStopReqBodyWrite)
} }
if re.err != nil { if re.err != nil {
if re.err == errClientConnGotGoAway {
cc.mu.Lock() cc.mu.Lock()
if cs.startedWrite { afterBodyWrite := cs.startedWrite
re.err = errClientConnGotGoAwayAfterSomeReqBody
}
cc.mu.Unlock() cc.mu.Unlock()
}
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
if afterBodyWrite {
return nil, afterReqBodyWriteError{re.err}
}
return nil, re.err return nil, re.err
} }
res.Request = req res.Request = req
@ -836,31 +880,31 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case re := <-readLoopResCh: case re := <-readLoopResCh:
return handleReadLoopResponse(re) return handleReadLoopResponse(re)
case <-respHeaderTimer: case <-respHeaderTimer:
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
} }
cc.forgetStreamID(cs.ID)
return nil, errTimeout return nil, errTimeout
case <-ctx.Done(): case <-ctx.Done():
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
} }
cc.forgetStreamID(cs.ID)
return nil, ctx.Err() return nil, ctx.Err()
case <-req.Cancel: case <-req.Cancel:
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
} }
cc.forgetStreamID(cs.ID)
return nil, errRequestCanceled return nil, errRequestCanceled
case <-cs.peerReset: case <-cs.peerReset:
// processResetStream already removed the // processResetStream already removed the
@ -887,6 +931,45 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
} }
} }
// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
// Must hold cc.mu.
func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
var waitingForConn chan struct{}
var waitingForConnErr error // guarded by cc.mu
for {
cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
if waitingForConn != nil {
close(waitingForConn)
}
return nil
}
// Unfortunately, we cannot wait on a condition variable and channel at
// the same time, so instead, we spin up a goroutine to check if the
// request is canceled while we wait for a slot to open in the connection.
if waitingForConn == nil {
waitingForConn = make(chan struct{})
go func() {
if err := awaitRequestCancel(req, waitingForConn); err != nil {
cc.mu.Lock()
waitingForConnErr = err
cc.cond.Broadcast()
cc.mu.Unlock()
}
}()
}
cc.pendingRequests++
cc.cond.Wait()
cc.pendingRequests--
if waitingForConnErr != nil {
return waitingForConnErr
}
}
}
// requires cc.wmu be held // requires cc.wmu be held
func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
first := true // first frame written (HEADERS is first, then CONTINUATION) first := true // first frame written (HEADERS is first, then CONTINUATION)
@ -1246,7 +1329,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
cc.idleTimer.Reset(cc.idleTimeout) cc.idleTimer.Reset(cc.idleTimeout)
} }
close(cs.done) close(cs.done)
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl // Wake up checkResetOrDone via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
cc.cond.Broadcast()
} }
return cs return cs
} }
@ -1345,8 +1430,9 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
} }
if se, ok := err.(StreamError); ok { if se, ok := err.(StreamError); ok {
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { if cs := cc.streamByID(se.StreamID, false); cs != nil {
cs.cc.writeStreamReset(cs.ID, se.Code, err) cs.cc.writeStreamReset(cs.ID, se.Code, err)
cs.cc.forgetStreamID(cs.ID)
if se.Cause == nil { if se.Cause == nil {
se.Cause = cc.fr.errDetail se.Cause = cc.fr.errDetail
} }
@ -1528,8 +1614,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil return res, nil
} }
buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
cs.bufPipe = pipe{b: buf}
cs.bytesRemain = res.ContentLength cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs} res.Body = transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req) go cs.awaitRequestCancel(cs.req)
@ -1656,6 +1741,7 @@ func (b transportResponseBody) Close() error {
cc.wmu.Lock() cc.wmu.Lock()
if !serverSentStreamEnd { if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
cs.didReset = true
} }
// Return connection-level flow control. // Return connection-level flow control.
if unread > 0 { if unread > 0 {
@ -1668,6 +1754,7 @@ func (b transportResponseBody) Close() error {
} }
cs.bufPipe.BreakWithError(errClosedResponseBody) cs.bufPipe.BreakWithError(errClosedResponseBody)
cc.forgetStreamID(cs.ID)
return nil return nil
} }
@ -1703,12 +1790,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
return nil return nil
} }
if f.Length > 0 { if f.Length > 0 {
if len(data) > 0 && cs.bufPipe.b == nil {
// Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
return ConnectionError(ErrCodeProtocol)
}
// Check connection-level flow control. // Check connection-level flow control.
cc.mu.Lock() cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) { if cs.inflow.available() >= int32(f.Length) {
@ -1719,16 +1800,27 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
} }
// Return any padded flow control now, since we won't // Return any padded flow control now, since we won't
// refund it later on body reads. // refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 { var refund int
cs.inflow.add(pad) if pad := int(f.Length) - len(data); pad > 0 {
cc.inflow.add(pad) refund += pad
}
// Return len(data) now if the stream is already closed,
// since data will never be read.
didReset := cs.didReset
if didReset {
refund += len(data)
}
if refund > 0 {
cc.inflow.add(int32(refund))
cc.wmu.Lock() cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(pad)) cc.fr.WriteWindowUpdate(0, uint32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) if !didReset {
cs.inflow.add(int32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
}
cc.bw.Flush() cc.bw.Flush()
cc.wmu.Unlock() cc.wmu.Unlock()
} }
didReset := cs.didReset
cc.mu.Unlock() cc.mu.Unlock()
if len(data) > 0 && !didReset { if len(data) > 0 && !didReset {

View File

@ -53,7 +53,7 @@ type PriorityWriteSchedulerConfig struct {
} }
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules // NewPriorityWriteScheduler constructs a WriteScheduler that schedules
// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. // frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
// If cfg is nil, default options are used. // If cfg is nil, default options are used.
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
if cfg == nil { if cfg == nil {

202
vendor/google.golang.org/genproto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,143 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
/*
Package status is a generated protocol buffer package.
It is generated from these files:
google/rpc/status.proto
It has these top-level messages:
Status
*/
package status
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs. It is used by
// [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error message,
// and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
// error message should be a developer-facing English message that helps
// developers *understand* and *resolve* the error. If a localized user-facing
// error message is needed, put the localized message in the error details or
// localize it in the client. The optional error details may contain arbitrary
// information about the error. There is a predefined set of error detail types
// in the package `google.rpc` which can be used for common error conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting purpose.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
// A list of messages that carry the error details. There will be a
// common set of message types for APIs to use.
Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetDetails() []*google_protobuf.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
0x00,
}

1
vendor/google.golang.org/grpc/AUTHORS generated vendored Normal file
View File

@ -0,0 +1 @@
Google Inc.

View File

@ -1,46 +1,32 @@
# How to contribute # How to contribute
We definitely welcome patches and contribution to grpc! Here are some guidelines We definitely welcome your patches and contributions to gRPC!
and information about how to do so.
## Sending patches If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
### Getting started
1. Check out the code:
$ go get google.golang.org/grpc
$ cd $GOPATH/src/google.golang.org/grpc
1. Create a fork of the grpc-go repository.
1. Add your fork as a remote:
$ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
1. Make changes, commit them.
1. Run the test suite:
$ make test
1. Push your changes to your fork:
$ git push fork ...
1. Open a pull request.
## Legal requirements ## Legal requirements
In order to protect both you and ourselves, you will need to sign the In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas). [Contributor License Agreement](https://cla.developers.google.com/clas).
## Filing Issues ## Guidelines for Pull Requests
When filing an issue, make sure to answer these five questions: How to get your contributions merged smoothly and quickly.
1. What version of Go are you using (`go version`)? - Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy.
2. What operating system and processor architecture are you using?
3. What did you do? - For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal).
4. What did you expect to see?
5. What did you see instead? - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists.
- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR.
- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity.
- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review).
- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
- Exceptions to the rules can be made if there's a compelling reason for doing so.
### Contributing code
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.

224
vendor/google.golang.org/grpc/LICENSE generated vendored
View File

@ -1,28 +1,202 @@
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without Apache License
modification, are permitted provided that the following conditions are Version 2.0, January 2004
met: http://www.apache.org/licenses/
* Redistributions of source code must retain the above copyright TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1. Definitions.
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR "License" shall mean the terms and conditions for use, reproduction,
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT and distribution as defined by Sections 1 through 9 of this document.
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT "Licensor" shall mean the copyright owner or entity authorized by
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, the copyright owner that is granting the License.
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT "Legal Entity" shall mean the union of the acting entity and all
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE other entities that control, are controlled by, or are under common
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the gRPC project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of gRPC, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of gRPC. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of gRPC or any code incorporated within this
implementation of gRPC constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of gRPC
shall terminate as of the date such litigation is filed.

View File

@ -1,8 +1,8 @@
#gRPC-Go # gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
Installation Installation
------------ ------------
@ -16,23 +16,7 @@ $ go get google.golang.org/grpc
Prerequisites Prerequisites
------------- -------------
This requires Go 1.5 or later. This requires Go 1.6 or later.
A note on the version used: significant performance improvements in benchmarks
of grpc-go have been seen by upgrading the go version from 1.5 to the latest
1.7.1.
From https://golang.org/doc/install, one way to install the latest version of go is:
```
$ GO_VERSION=1.7.1
$ OS=linux
$ ARCH=amd64
$ curl -O https://storage.googleapis.com/golang/go${GO_VERSION}.${OS}-${ARCH}.tar.gz
$ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz
$ # Put go on the PATH, keep the usual installation dir
$ sudo ln -s /usr/local/go/bin/go /usr/bin/go
$ rm go$GO_VERSION.$OS-$ARCH.tar.gz
```
Constraints Constraints
----------- -----------
@ -42,9 +26,13 @@ Documentation
------------- -------------
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
Performance
-----------
See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Status Status
------ ------
GA General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).
FAQ FAQ
--- ---

View File

@ -1,3 +1,21 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc package grpc
import ( import (

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -35,6 +20,7 @@ package grpc
import ( import (
"fmt" "fmt"
"net"
"sync" "sync"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -60,6 +46,10 @@ type BalancerConfig struct {
// use to dial to a remote load balancer server. The Balancer implementations // use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely. // can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials DialCreds credentials.TransportCredentials
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
Dialer func(context.Context, string) (net.Conn, error)
} }
// BalancerGetOptions configures a Get call. // BalancerGetOptions configures a Get call.
@ -167,7 +157,7 @@ type roundRobin struct {
func (rr *roundRobin) watchAddrUpdates() error { func (rr *roundRobin) watchAddrUpdates() error {
updates, err := rr.w.Next() updates, err := rr.w.Next()
if err != nil { if err != nil {
grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
return err return err
} }
rr.mu.Lock() rr.mu.Lock()
@ -183,7 +173,7 @@ func (rr *roundRobin) watchAddrUpdates() error {
for _, v := range rr.addrs { for _, v := range rr.addrs {
if addr == v.addr { if addr == v.addr {
exist = true exist = true
grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
break break
} }
} }
@ -200,7 +190,7 @@ func (rr *roundRobin) watchAddrUpdates() error {
} }
} }
default: default:
grpclog.Println("Unknown update.Op ", update.Op) grpclog.Errorln("Unknown update.Op ", update.Op)
} }
} }
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
@ -211,6 +201,10 @@ func (rr *roundRobin) watchAddrUpdates() error {
if rr.done { if rr.done {
return ErrClientConnClosing return ErrClientConnClosing
} }
select {
case <-rr.addrCh:
default:
}
rr.addrCh <- open rr.addrCh <- open
return nil return nil
} }
@ -233,7 +227,7 @@ func (rr *roundRobin) Start(target string, config BalancerConfig) error {
return err return err
} }
rr.w = w rr.w = w
rr.addrCh = make(chan []Address) rr.addrCh = make(chan []Address, 1)
go func() { go func() {
for { for {
if err := rr.watchAddrUpdates(); err != nil { if err := rr.watchAddrUpdates(); err != nil {
@ -385,6 +379,9 @@ func (rr *roundRobin) Notify() <-chan []Address {
func (rr *roundRobin) Close() error { func (rr *roundRobin) Close() error {
rr.mu.Lock() rr.mu.Lock()
defer rr.mu.Unlock() defer rr.mu.Unlock()
if rr.done {
return errBalancerClosed
}
rr.done = true rr.done = true
if rr.w != nil { if rr.w != nil {
rr.w.Close() rr.w.Close()

141
vendor/google.golang.org/grpc/call.go generated vendored
View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -36,13 +21,14 @@ package grpc
import ( import (
"bytes" "bytes"
"io" "io"
"math"
"time" "time"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats" "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -72,14 +58,17 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
} }
} }
for { for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil { if c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF { if err == io.EOF {
break break
} }
return return
} }
} }
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK { if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases. // TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary. // Fix the order if necessary.
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
@ -89,11 +78,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
} }
// sendRequest writes out various information of an RPC such as Context and Message. // sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
return nil, err
}
defer func() { defer func() {
if err != nil { if err != nil {
// If err is connection error, t will be closed, no need to close stream here. // If err is connection error, t will be closed, no need to close stream here.
@ -116,7 +101,13 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
} }
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
if err != nil { if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err) return err
}
if c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(outBuf) > *c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize)
} }
err = t.Write(stream, outBuf, opts) err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil { if err == nil && outPayload != nil {
@ -127,10 +118,10 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status. // recvResponse to get the final status.
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return nil, err return err
} }
// Sent successfully. // Sent successfully.
return stream, nil return nil
} }
// Invoke sends the RPC request on the wire and returns after response is received. // Invoke sends the RPC request on the wire and returns after response is received.
@ -145,14 +136,18 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok { mc := cc.GetMethodConfig(method)
c.failFast = !mc.WaitForReady if mc.WaitForReady != nil {
if mc.Timeout > 0 { c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil && *mc.Timeout >= 0 {
var cancel context.CancelFunc var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, mc.Timeout) ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
defer cancel() defer cancel()
} }
}
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts { for _, o := range opts {
if err := o.before(&c); err != nil { if err := o.before(&c); err != nil {
return toRPCErr(err) return toRPCErr(err)
@ -163,6 +158,10 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
o.after(&c) o.after(&c)
} }
}() }()
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
if EnableTracing { if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish() defer c.traceInfo.tr.Finish()
@ -179,26 +178,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
} }
}() }()
} }
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler sh := cc.dopts.copts.StatsHandler
if sh != nil { if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{ begin := &stats.Begin{
Client: true, Client: true,
BeginTime: time.Now(), BeginTime: time.Now(),
FailFast: c.failFast, FailFast: c.failFast,
} }
sh.HandleRPC(ctx, begin) sh.HandleRPC(ctx, begin)
}
defer func() { defer func() {
if sh != nil {
end := &stats.End{ end := &stats.End{
Client: true, Client: true,
EndTime: time.Now(), EndTime: time.Now(),
Error: e, Error: e,
} }
sh.HandleRPC(ctx, end) sh.HandleRPC(ctx, end)
}
}() }()
}
topts := &transport.Options{ topts := &transport.Options{
Last: true, Last: true,
Delay: false, Delay: false,
@ -220,6 +218,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if cc.dopts.cp != nil { if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type() callHdr.SendCompress = cc.dopts.cp.Type()
} }
if c.creds != nil {
callHdr.Creds = c.creds
}
gopts := BalancerGetOptions{ gopts := BalancerGetOptions{
BlockingWait: !c.failFast, BlockingWait: !c.failFast,
@ -227,7 +228,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
t, put, err = cc.getTransport(ctx, gopts) t, put, err = cc.getTransport(ctx, gopts)
if err != nil { if err != nil {
// TODO(zhaoq): Probably revisit the error handling. // TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok { if _, ok := status.FromError(err); ok {
return err return err
} }
if err == errConnClosing || err == errConnUnavailable { if err == errConnClosing || err == errConnUnavailable {
@ -242,19 +243,38 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if c.traceInfo.tr != nil { if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
} }
stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts) stream, err = t.NewStream(ctx, callHdr)
if err != nil { if err != nil {
if put != nil { if put != nil {
if _, ok := err.(transport.ConnectionError); ok {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
put()
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
}
if peer, ok := peer.FromContext(stream.Context()); ok {
c.peer = peer
}
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put() put()
put = nil
} }
// Retry a non-failfast RPC when // Retry a non-failfast RPC when
// i) there is a connection error; or // i) there is a connection error; or
// ii) the server started to drain before this RPC was initiated. // ii) the server started to drain before this RPC was initiated.
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
if c.failFast {
return toRPCErr(err)
}
continue continue
} }
return toRPCErr(err) return toRPCErr(err)
@ -262,13 +282,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
if err != nil { if err != nil {
if put != nil { if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put() put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
} }
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue continue
} }
return toRPCErr(err) return toRPCErr(err)
@ -278,9 +298,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
} }
t.CloseStream(stream, nil) t.CloseStream(stream, nil)
if put != nil { if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put() put()
put = nil
} }
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) return stream.Status().Err()
} }
} }

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -35,7 +20,6 @@ package grpc
import ( import (
"errors" "errors"
"fmt"
"net" "net"
"strings" "strings"
"sync" "sync"
@ -43,8 +27,10 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/stats" "google.golang.org/grpc/stats"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -55,8 +41,7 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing") ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the // ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout. // underlying connections within the specified timeout.
// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be // DEPRECATED: Please use context.DeadlineExceeded instead.
// removed in Q1 2017.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing") ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security // errNoTransportSecurity indicates that there is no transport security
@ -78,7 +63,8 @@ var (
errConnClosing = errors.New("grpc: the connection is closing") errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable. // errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable") errConnUnavailable = errors.New("grpc: the connection is unavailable")
errNoAddr = errors.New("grpc: there is no address available to dial") // errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// minimum time to give a connection to complete // minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second minConnectTimeout = 20 * time.Second
) )
@ -98,11 +84,45 @@ type dialOptions struct {
timeout time.Duration timeout time.Duration
scChan <-chan ServiceConfig scChan <-chan ServiceConfig
copts transport.ConnectOptions copts transport.ConnectOptions
callOptions []CallOption
} }
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = 1024 * 1024 * 4
)
// DialOption configures how we set up the connection. // DialOption configures how we set up the connection.
type DialOption func(*dialOptions) type DialOption func(*dialOptions)
// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.copts.InitialWindowSize = s
}
}
// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialConnWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.copts.InitialConnWindowSize = s
}
}
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
func WithMaxMsgSize(s int) DialOption {
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
}
// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
func WithDefaultCallOptions(cos ...CallOption) DialOption {
return func(o *dialOptions) {
o.callOptions = append(o.callOptions, cos...)
}
}
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. // WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
func WithCodec(c Codec) DialOption { func WithCodec(c Codec) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
@ -194,7 +214,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption
} }
// WithPerRPCCredentials returns a DialOption which sets // WithPerRPCCredentials returns a DialOption which sets
// credentials which will place auth state on each outbound RPC. // credentials and places auth state on each outbound RPC.
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
@ -203,6 +223,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
// initially. This is valid if and only if WithBlock() is present. // initially. This is valid if and only if WithBlock() is present.
// Deprecated: use DialContext and context.WithTimeout instead.
func WithTimeout(d time.Duration) DialOption { func WithTimeout(d time.Duration) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
o.timeout = d o.timeout = d
@ -231,7 +252,7 @@ func WithStatsHandler(h stats.Handler) DialOption {
} }
} }
// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. // FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
// address and won't try to reconnect. // address and won't try to reconnect.
// The default value of FailOnNonTempDialError is false. // The default value of FailOnNonTempDialError is false.
@ -249,6 +270,13 @@ func WithUserAgent(s string) DialOption {
} }
} }
// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
return func(o *dialOptions) {
o.copts.KeepaliveParams = kp
}
}
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. // WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
return func(o *dialOptions) { return func(o *dialOptions) {
@ -263,25 +291,52 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
} }
} }
// WithAuthority returns a DialOption that specifies the value to be used as
// the :authority pseudo-header. This value only works with WithInsecure and
// has no effect if TransportCredentials are present.
func WithAuthority(a string) DialOption {
return func(o *dialOptions) {
o.copts.Authority = a
}
}
// Dial creates a client connection to the given target. // Dial creates a client connection to the given target.
func Dial(target string, opts ...DialOption) (*ClientConn, error) { func Dial(target string, opts ...DialOption) (*ClientConn, error) {
return DialContext(context.Background(), target, opts...) return DialContext(context.Background(), target, opts...)
} }
// DialContext creates a client connection to the given target. ctx can be used to // DialContext creates a client connection to the given target. ctx can be used to
// cancel or expire the pending connecting. Once this function returns, the // cancel or expire the pending connection. Once this function returns, the
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close // cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
// to terminate all the pending operations after this function returns. // to terminate all the pending operations after this function returns.
// This is the EXPERIMENTAL API.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{ cc := &ClientConn{
target: target, target: target,
csMgr: &connectivityStateManager{},
conns: make(map[Address]*addrConn), conns: make(map[Address]*addrConn),
} }
cc.csEvltr = &connectivityStateEvaluator{csMgr: cc.csMgr}
cc.ctx, cc.cancel = context.WithCancel(context.Background()) cc.ctx, cc.cancel = context.WithCancel(context.Background())
for _, opt := range opts { for _, opt := range opts {
opt(&cc.dopts) opt(&cc.dopts)
} }
cc.mkp = cc.dopts.copts.KeepaliveParams
if cc.dopts.copts.Dialer == nil {
cc.dopts.copts.Dialer = newProxyDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
return dialContext(ctx, "tcp", addr)
},
)
}
if cc.dopts.copts.UserAgent != "" {
cc.dopts.copts.UserAgent += " " + grpcUA
} else {
cc.dopts.copts.UserAgent = grpcUA
}
if cc.dopts.timeout > 0 { if cc.dopts.timeout > 0 {
var cancel context.CancelFunc var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
@ -300,15 +355,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
} }
}() }()
scSet := false
if cc.dopts.scChan != nil { if cc.dopts.scChan != nil {
// Wait for the initial service config. // Try to get an initial service config.
select { select {
case sc, ok := <-cc.dopts.scChan: case sc, ok := <-cc.dopts.scChan:
if ok { if ok {
cc.sc = sc cc.sc = sc
scSet = true
} }
case <-ctx.Done(): default:
return nil, ctx.Err()
} }
} }
// Set defaults. // Set defaults.
@ -321,54 +377,47 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
creds := cc.dopts.copts.TransportCredentials creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" { if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName cc.authority = creds.Info().ServerName
} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
cc.authority = cc.dopts.copts.Authority
} else { } else {
colonPos := strings.LastIndex(target, ":") cc.authority = target
if colonPos == -1 {
colonPos = len(target)
} }
cc.authority = target[:colonPos]
}
var ok bool
waitC := make(chan error, 1) waitC := make(chan error, 1)
go func() { go func() {
var addrs []Address defer close(waitC)
if cc.dopts.balancer == nil && cc.sc.LB != nil { if cc.dopts.balancer == nil && cc.sc.LB != nil {
cc.dopts.balancer = cc.sc.LB cc.dopts.balancer = cc.sc.LB
} }
if cc.dopts.balancer == nil { if cc.dopts.balancer != nil {
// Connect to target directly if balancer is nil.
addrs = append(addrs, Address{Addr: target})
} else {
var credsClone credentials.TransportCredentials var credsClone credentials.TransportCredentials
if creds != nil { if creds != nil {
credsClone = creds.Clone() credsClone = creds.Clone()
} }
config := BalancerConfig{ config := BalancerConfig{
DialCreds: credsClone, DialCreds: credsClone,
Dialer: cc.dopts.copts.Dialer,
} }
if err := cc.dopts.balancer.Start(target, config); err != nil { if err := cc.dopts.balancer.Start(target, config); err != nil {
waitC <- err waitC <- err
return return
} }
ch := cc.dopts.balancer.Notify() ch := cc.dopts.balancer.Notify()
if ch == nil { if ch != nil {
// There is no name resolver installed. if cc.dopts.block {
addrs = append(addrs, Address{Addr: target}) doneChan := make(chan struct{})
go cc.lbWatcher(doneChan)
<-doneChan
} else { } else {
addrs, ok = <-ch go cc.lbWatcher(nil)
if !ok || len(addrs) == 0 { }
waitC <- errNoAddr
return return
} }
} }
} // No balancer, or no resolver within the balancer. Connect directly.
for _, a := range addrs { if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil {
if err := cc.resetAddrConn(a, false, nil); err != nil {
waitC <- err waitC <- err
return return
} }
}
close(waitC)
}() }()
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -378,50 +427,113 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return nil, err return nil, err
} }
} }
if cc.dopts.scChan != nil && !scSet {
// If balancer is nil or balancer.Notify() is nil, ok will be false here. // Blocking wait for the initial service config.
// The lbWatcher goroutine will not be created. select {
case sc, ok := <-cc.dopts.scChan:
if ok { if ok {
go cc.lbWatcher() cc.sc = sc
}
case <-ctx.Done():
return nil, ctx.Err()
}
} }
if cc.dopts.scChan != nil { if cc.dopts.scChan != nil {
go cc.scWatcher() go cc.scWatcher()
} }
return cc, nil return cc, nil
} }
// ConnectivityState indicates the state of a client connection. // connectivityStateEvaluator gets updated by addrConns when their
type ConnectivityState int // states transition, based on which it evaluates the state of
// ClientConn.
// Note: This code will eventually sit in the balancer in the new design.
type connectivityStateEvaluator struct {
csMgr *connectivityStateManager
mu sync.Mutex
numReady uint64 // Number of addrConns in ready state.
numConnecting uint64 // Number of addrConns in connecting state.
numTransientFailure uint64 // Number of addrConns in transientFailure.
}
const ( // recordTransition records state change happening in every addrConn and based on
// Idle indicates the ClientConn is idle. // that it evaluates what state the ClientConn is in.
Idle ConnectivityState = iota // It can only transition between connectivity.Ready, connectivity.Connecting and connectivity.TransientFailure. Other states,
// Connecting indicates the ClienConn is connecting. // Idle and connectivity.Shutdown are transitioned into by ClientConn; in the begining of the connection
Connecting // before any addrConn is created ClientConn is in idle state. In the end when ClientConn
// Ready indicates the ClientConn is ready for work. // closes it is in connectivity.Shutdown state.
Ready // TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
// TransientFailure indicates the ClientConn has seen a failure but expects to recover. func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) {
TransientFailure cse.mu.Lock()
// Shutdown indicates the ClientConn has started shutting down. defer cse.mu.Unlock()
Shutdown
)
func (s ConnectivityState) String() string { // Update counters.
switch s { for idx, state := range []connectivity.State{oldState, newState} {
case Idle: updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
return "IDLE" switch state {
case Connecting: case connectivity.Ready:
return "CONNECTING" cse.numReady += updateVal
case Ready: case connectivity.Connecting:
return "READY" cse.numConnecting += updateVal
case TransientFailure: case connectivity.TransientFailure:
return "TRANSIENT_FAILURE" cse.numTransientFailure += updateVal
case Shutdown:
return "SHUTDOWN"
default:
panic(fmt.Sprintf("unknown connectivity state: %d", s))
} }
}
// Evaluate.
if cse.numReady > 0 {
cse.csMgr.updateState(connectivity.Ready)
return
}
if cse.numConnecting > 0 {
cse.csMgr.updateState(connectivity.Connecting)
return
}
cse.csMgr.updateState(connectivity.TransientFailure)
}
// connectivityStateManager keeps the connectivity.State of ClientConn.
// This struct will eventually be exported so the balancers can access it.
type connectivityStateManager struct {
mu sync.Mutex
state connectivity.State
notifyChan chan struct{}
}
// updateState updates the connectivity.State of ClientConn.
// If there's a change it notifies goroutines waiting on state change to
// happen.
func (csm *connectivityStateManager) updateState(state connectivity.State) {
csm.mu.Lock()
defer csm.mu.Unlock()
if csm.state == connectivity.Shutdown {
return
}
if csm.state == state {
return
}
csm.state = state
if csm.notifyChan != nil {
// There are other goroutines waiting on this channel.
close(csm.notifyChan)
csm.notifyChan = nil
}
}
func (csm *connectivityStateManager) getState() connectivity.State {
csm.mu.Lock()
defer csm.mu.Unlock()
return csm.state
}
func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
csm.mu.Lock()
defer csm.mu.Unlock()
if csm.notifyChan == nil {
csm.notifyChan = make(chan struct{})
}
return csm.notifyChan
} }
// ClientConn represents a client connection to an RPC server. // ClientConn represents a client connection to an RPC server.
@ -432,13 +544,51 @@ type ClientConn struct {
target string target string
authority string authority string
dopts dialOptions dopts dialOptions
csMgr *connectivityStateManager
csEvltr *connectivityStateEvaluator // This will eventually be part of balancer.
mu sync.RWMutex mu sync.RWMutex
sc ServiceConfig sc ServiceConfig
conns map[Address]*addrConn conns map[Address]*addrConn
// Keepalive parameter can be updated if a GoAway is received.
mkp keepalive.ClientParameters
} }
func (cc *ClientConn) lbWatcher() { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
// This is an EXPERIMENTAL API.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
return true
}
select {
case <-ctx.Done():
return false
case <-ch:
return true
}
}
// GetState returns the connectivity.State of ClientConn.
// This is an EXPERIMENTAL API.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
// lbWatcher watches the Notify channel of the balancer in cc and manages
// connections accordingly. If doneChan is not nil, it is closed after the
// first successfull connection is made.
func (cc *ClientConn) lbWatcher(doneChan chan struct{}) {
defer func() {
// In case channel from cc.dopts.balancer.Notify() gets closed before a
// successful connection gets established, don't forget to notify the
// caller.
if doneChan != nil {
close(doneChan)
}
}()
for addrs := range cc.dopts.balancer.Notify() { for addrs := range cc.dopts.balancer.Notify() {
var ( var (
add []Address // Addresses need to setup connections. add []Address // Addresses need to setup connections.
@ -465,7 +615,19 @@ func (cc *ClientConn) lbWatcher() {
} }
cc.mu.Unlock() cc.mu.Unlock()
for _, a := range add { for _, a := range add {
cc.resetAddrConn(a, true, nil) var err error
if doneChan != nil {
err = cc.resetAddrConn(a, true, nil)
if err == nil {
close(doneChan)
doneChan = nil
}
} else {
err = cc.resetAddrConn(a, false, nil)
}
if err != nil {
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
}
} }
for _, c := range del { for _, c := range del {
c.tearDown(errConnDrain) c.tearDown(errConnDrain)
@ -494,14 +656,18 @@ func (cc *ClientConn) scWatcher() {
// resetAddrConn creates an addrConn for addr and adds it to cc.conns. // resetAddrConn creates an addrConn for addr and adds it to cc.conns.
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. // If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
// If tearDownErr is nil, errConnDrain will be used instead. // If tearDownErr is nil, errConnDrain will be used instead.
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { //
// We should never need to replace an addrConn with a new one. This function is only used
// as newAddrConn to create new addrConn.
// TODO rename this function and clean up the code.
func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error {
ac := &addrConn{ ac := &addrConn{
cc: cc, cc: cc,
addr: addr, addr: addr,
dopts: cc.dopts, dopts: cc.dopts,
} }
ac.ctx, ac.cancel = context.WithCancel(cc.ctx) ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
ac.stateCV = sync.NewCond(&ac.mu) ac.csEvltr = cc.csEvltr
if EnableTracing { if EnableTracing {
ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
} }
@ -530,10 +696,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
cc.mu.Unlock() cc.mu.Unlock()
if stale != nil { if stale != nil {
// There is an addrConn alive on ac.addr already. This could be due to // There is an addrConn alive on ac.addr already. This could be due to
// 1) a buggy Balancer notifies duplicated Addresses; // a buggy Balancer that reports duplicated Addresses.
// 2) goaway was received, a new ac will replace the old ac.
// The old ac should be deleted from cc.conns, but the
// underlying transport should drain rather than close.
if tearDownErr == nil { if tearDownErr == nil {
// tearDownErr is nil if resetAddrConn is called by // tearDownErr is nil if resetAddrConn is called by
// 1) Dial // 1) Dial
@ -544,8 +707,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
stale.tearDown(tearDownErr) stale.tearDown(tearDownErr)
} }
} }
// skipWait may overwrite the decision in ac.dopts.block. if block {
if ac.dopts.block && !skipWait {
if err := ac.resetTransport(false); err != nil { if err := ac.resetTransport(false); err != nil {
if err != errConnClosing { if err != errConnClosing {
// Tear down ac and delete it from cc.conns. // Tear down ac and delete it from cc.conns.
@ -565,7 +727,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
// Start a goroutine connecting to the server asynchronously. // Start a goroutine connecting to the server asynchronously.
go func() { go func() {
if err := ac.resetTransport(false); err != nil { if err := ac.resetTransport(false); err != nil {
grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
if err != errConnClosing { if err != errConnClosing {
// Keep this ac in cc.conns, to get the reason it's torn down. // Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err) ac.tearDown(err)
@ -578,12 +740,23 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
return nil return nil
} }
// TODO: Avoid the locking here. // GetMethodConfig gets the method config of the input method.
func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { // If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the default config
// under the service (i.e /service/). If there is a default MethodConfig for
// the serivce, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock() cc.mu.RLock()
defer cc.mu.RUnlock() defer cc.mu.RUnlock()
m, ok = cc.sc.Methods[method] m, ok := cc.sc.Methods[method]
return if !ok {
i := strings.LastIndex(method, "/")
m, _ = cc.sc.Methods[method[:i+1]]
}
return m
} }
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
@ -624,6 +797,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
} }
if !ok { if !ok {
if put != nil { if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put() put()
} }
return nil, nil, errConnClosing return nil, nil, errConnClosing
@ -631,6 +805,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
if err != nil { if err != nil {
if put != nil { if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put() put()
} }
return nil, nil, err return nil, nil, err
@ -649,6 +824,7 @@ func (cc *ClientConn) Close() error {
} }
conns := cc.conns conns := cc.conns
cc.conns = nil cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
cc.mu.Unlock() cc.mu.Unlock()
if cc.dopts.balancer != nil { if cc.dopts.balancer != nil {
cc.dopts.balancer.Close() cc.dopts.balancer.Close()
@ -669,9 +845,10 @@ type addrConn struct {
dopts dialOptions dopts dialOptions
events trace.EventLog events trace.EventLog
csEvltr *connectivityStateEvaluator
mu sync.Mutex mu sync.Mutex
state ConnectivityState state connectivity.State
stateCV *sync.Cond
down func(error) // the handler called when a connection is down. down func(error) // the handler called when a connection is down.
// ready is closed and becomes nil when a new transport is up or failed // ready is closed and becomes nil when a new transport is up or failed
// due to timeout. // due to timeout.
@ -682,6 +859,20 @@ type addrConn struct {
tearDownErr error tearDownErr error
} }
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
switch r {
case transport.TooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.mkp.Time {
ac.cc.mkp.Time = v
}
ac.cc.mu.Unlock()
}
}
// printf records an event in ac's event log, unless ac has been closed. // printf records an event in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held. // REQUIRES ac.mu is held.
func (ac *addrConn) printf(format string, a ...interface{}) { func (ac *addrConn) printf(format string, a ...interface{}) {
@ -698,62 +889,41 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
} }
} }
// getState returns the connectivity state of the Conn // resetTransport recreates a transport to the address for ac.
func (ac *addrConn) getState() ConnectivityState { // For the old transport:
// - if drain is true, it will be gracefully closed.
// - otherwise, it will be closed.
func (ac *addrConn) resetTransport(drain bool) error {
ac.mu.Lock() ac.mu.Lock()
defer ac.mu.Unlock() if ac.state == connectivity.Shutdown {
return ac.state
}
// waitForStateChange blocks until the state changes to something other than the sourceState.
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
ac.mu.Lock()
defer ac.mu.Unlock()
if sourceState != ac.state {
return ac.state, nil
}
done := make(chan struct{})
var err error
go func() {
select {
case <-ctx.Done():
ac.mu.Lock()
err = ctx.Err()
ac.stateCV.Broadcast()
ac.mu.Unlock()
case <-done:
}
}()
defer close(done)
for sourceState == ac.state {
ac.stateCV.Wait()
if err != nil {
return ac.state, err
}
}
return ac.state, nil
}
func (ac *addrConn) resetTransport(closeTransport bool) error {
for retries := 0; ; retries++ {
ac.mu.Lock()
ac.printf("connecting")
if ac.state == Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock() ac.mu.Unlock()
return errConnClosing return errConnClosing
} }
ac.printf("connecting")
if ac.down != nil { if ac.down != nil {
ac.down(downErrorf(false, true, "%v", errNetworkIO)) ac.down(downErrorf(false, true, "%v", errNetworkIO))
ac.down = nil ac.down = nil
} }
ac.state = Connecting oldState := ac.state
ac.stateCV.Broadcast() ac.state = connectivity.Connecting
ac.csEvltr.recordTransition(oldState, ac.state)
t := ac.transport t := ac.transport
ac.transport = nil
ac.mu.Unlock() ac.mu.Unlock()
if closeTransport && t != nil { if t != nil && !drain {
t.Close() t.Close()
} }
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
ac.cc.mu.RUnlock()
for retries := 0; ; retries++ {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return errConnClosing
}
ac.mu.Unlock()
sleepTime := ac.dopts.bs.backoff(retries) sleepTime := ac.dopts.bs.backoff(retries)
timeout := minConnectTimeout timeout := minConnectTimeout
if timeout < sleepTime { if timeout < sleepTime {
@ -766,45 +936,51 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
Metadata: ac.addr.Metadata, Metadata: ac.addr.Metadata,
} }
newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
// Don't call cancel in success path due to a race in Go 1.6:
// https://github.com/golang/go/issues/15078.
if err != nil { if err != nil {
cancel() cancel()
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
return err return err
} }
grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr)
ac.mu.Lock() ac.mu.Lock()
if ac.state == Shutdown { if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked. // ac.tearDown(...) has been invoked.
ac.mu.Unlock() ac.mu.Unlock()
return errConnClosing return errConnClosing
} }
ac.errorf("transient failure: %v", err) ac.errorf("transient failure: %v", err)
ac.state = TransientFailure oldState = ac.state
ac.stateCV.Broadcast() ac.state = connectivity.TransientFailure
ac.csEvltr.recordTransition(oldState, ac.state)
if ac.ready != nil { if ac.ready != nil {
close(ac.ready) close(ac.ready)
ac.ready = nil ac.ready = nil
} }
ac.mu.Unlock() ac.mu.Unlock()
closeTransport = false timer := time.NewTimer(sleepTime - time.Since(connectTime))
select { select {
case <-time.After(sleepTime - time.Since(connectTime)): case <-timer.C:
case <-ac.ctx.Done(): case <-ac.ctx.Done():
timer.Stop()
return ac.ctx.Err() return ac.ctx.Err()
} }
timer.Stop()
continue continue
} }
ac.mu.Lock() ac.mu.Lock()
ac.printf("ready") ac.printf("ready")
if ac.state == Shutdown { if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked. // ac.tearDown(...) has been invoked.
ac.mu.Unlock() ac.mu.Unlock()
newTransport.Close() newTransport.Close()
return errConnClosing return errConnClosing
} }
ac.state = Ready oldState = ac.state
ac.stateCV.Broadcast() ac.state = connectivity.Ready
ac.csEvltr.recordTransition(oldState, ac.state)
ac.transport = newTransport ac.transport = newTransport
if ac.ready != nil { if ac.ready != nil {
close(ac.ready) close(ac.ready)
@ -836,43 +1012,59 @@ func (ac *addrConn) transportMonitor() {
} }
return return
case <-t.GoAway(): case <-t.GoAway():
// If GoAway happens without any network I/O error, ac is closed without shutting down the ac.adjustParams(t.GetGoAwayReason())
// underlying transport (the transport will be closed when all the pending RPCs finished or // If GoAway happens without any network I/O error, the underlying transport
// failed.). // will be gracefully closed, and a new transport will be created.
// If GoAway and some network I/O error happen concurrently, ac and its underlying transport // (The transport will be closed when all the pending RPCs finished or failed.)
// are closed. // If GoAway and some network I/O error happen concurrently, the underlying transport
// In both cases, a new ac is created. // will be closed, and a new transport will be created.
var drain bool
select { select {
case <-t.Error(): case <-t.Error():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
default: default:
ac.cc.resetAddrConn(ac.addr, true, errConnDrain) drain = true
}
if err := ac.resetTransport(drain); err != nil {
grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err)
if err != errConnClosing {
// Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err)
} }
return return
}
case <-t.Error(): case <-t.Error():
select { select {
case <-ac.ctx.Done(): case <-ac.ctx.Done():
t.Close() t.Close()
return return
case <-t.GoAway(): case <-t.GoAway():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) ac.adjustParams(t.GetGoAwayReason())
if err := ac.resetTransport(false); err != nil {
grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err)
if err != errConnClosing {
// Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err)
}
return return
}
default: default:
} }
ac.mu.Lock() ac.mu.Lock()
if ac.state == Shutdown { if ac.state == connectivity.Shutdown {
// ac has been shutdown. // ac has been shutdown.
ac.mu.Unlock() ac.mu.Unlock()
return return
} }
ac.state = TransientFailure oldState := ac.state
ac.stateCV.Broadcast() ac.state = connectivity.TransientFailure
ac.csEvltr.recordTransition(oldState, ac.state)
ac.mu.Unlock() ac.mu.Unlock()
if err := ac.resetTransport(true); err != nil { if err := ac.resetTransport(false); err != nil {
grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err)
ac.mu.Lock() ac.mu.Lock()
ac.printf("transport exiting: %v", err) ac.printf("transport exiting: %v", err)
ac.mu.Unlock() ac.mu.Unlock()
grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err)
if err != errConnClosing { if err != errConnClosing {
// Keep this ac in cc.conns, to get the reason it's torn down. // Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err) ac.tearDown(err)
@ -884,12 +1076,12 @@ func (ac *addrConn) transportMonitor() {
} }
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
// iv) transport is in TransientFailure and there is a balancer/failfast is true. // iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true.
func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
for { for {
ac.mu.Lock() ac.mu.Lock()
switch { switch {
case ac.state == Shutdown: case ac.state == connectivity.Shutdown:
if failfast || !hasBalancer { if failfast || !hasBalancer {
// RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
err := ac.tearDownErr err := ac.tearDownErr
@ -898,11 +1090,11 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans
} }
ac.mu.Unlock() ac.mu.Unlock()
return nil, errConnClosing return nil, errConnClosing
case ac.state == Ready: case ac.state == connectivity.Ready:
ct := ac.transport ct := ac.transport
ac.mu.Unlock() ac.mu.Unlock()
return ct, nil return ct, nil
case ac.state == TransientFailure: case ac.state == connectivity.TransientFailure:
if failfast || hasBalancer { if failfast || hasBalancer {
ac.mu.Unlock() ac.mu.Unlock()
return nil, errConnUnavailable return nil, errConnUnavailable
@ -944,12 +1136,13 @@ func (ac *addrConn) tearDown(err error) {
// address removal and GoAway. // address removal and GoAway.
ac.transport.GracefulClose() ac.transport.GracefulClose()
} }
if ac.state == Shutdown { if ac.state == connectivity.Shutdown {
return return
} }
ac.state = Shutdown oldState := ac.state
ac.state = connectivity.Shutdown
ac.tearDownErr = err ac.tearDownErr = err
ac.stateCV.Broadcast() ac.csEvltr.recordTransition(oldState, ac.state)
if ac.events != nil { if ac.events != nil {
ac.events.Finish() ac.events.Finish()
ac.events = nil ac.events = nil

104
vendor/google.golang.org/grpc/codec.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"math"
"sync"
"github.com/golang/protobuf/proto"
)
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
// a Codec's methods can be called from concurrent goroutines.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct {
}
type cachedProtoBuffer struct {
lastMarshaledSize uint32
proto.Buffer
}
func capToMaxInt32(val int) uint32 {
if val > math.MaxInt32 {
return uint32(math.MaxInt32)
}
return uint32(val)
}
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
protoMsg := v.(proto.Message)
newSlice := make([]byte, 0, cb.lastMarshaledSize)
cb.SetBuf(newSlice)
cb.Reset()
if err := cb.Marshal(protoMsg); err != nil {
return nil, err
}
out := cb.Bytes()
cb.lastMarshaledSize = capToMaxInt32(len(out))
return out, nil
}
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := p.marshal(v, cb)
// put back buffer and lose the ref to the slice
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return out, err
}
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
v.(proto.Message).Reset()
err := cb.Unmarshal(v.(proto.Message))
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return err
}
func (protoCodec) String() string {
return "proto"
}
var (
protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}
)

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -44,7 +29,7 @@ const (
// OK is returned on success. // OK is returned on success.
OK Code = 0 OK Code = 0
// Canceled indicates the operation was cancelled (typically by the caller). // Canceled indicates the operation was canceled (typically by the caller).
Canceled Code = 1 Canceled Code = 1
// Unknown error. An example of where this error may be returned is // Unknown error. An example of where this error may be returned is

View File

@ -0,0 +1,72 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package connectivity defines connectivity semantics.
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
// All APIs in this package are experimental.
package connectivity
import (
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
)
// State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn.
type State int
func (s State) String() string {
switch s {
case Idle:
return "IDLE"
case Connecting:
return "CONNECTING"
case Ready:
return "READY"
case TransientFailure:
return "TRANSIENT_FAILURE"
case Shutdown:
return "SHUTDOWN"
default:
grpclog.Errorf("unknown connectivity state: %d", s)
return "Invalid-State"
}
}
const (
// Idle indicates the ClientConn is idle.
Idle State = iota
// Connecting indicates the ClienConn is connecting.
Connecting
// Ready indicates the ClientConn is ready for work.
Ready
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
TransientFailure
// Shutdown indicates the ClientConn has started shutting down.
Shutdown
)
// Reporter reports the connectivity states.
type Reporter interface {
// CurrentState returns the current state of the reporter.
CurrentState() State
// WaitForStateChange blocks until the reporter's state is different from the given state,
// and returns true.
// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
WaitForStateChange(context.Context, State) bool
}

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -102,6 +87,10 @@ type TransportCredentials interface {
// authentication protocol on rawConn for clients. It returns the authenticated // authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection. // connection and the corresponding auth information about the connection.
// Implementations must use the provided context to implement timely cancellation. // Implementations must use the provided context to implement timely cancellation.
// gRPC will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns // ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about // the authenticated connection and the corresponding auth information about
@ -192,14 +181,14 @@ func NewTLS(c *tls.Config) TransportCredentials {
return tc return tc
} }
// NewClientTLSFromCert constructs a TLS from the input certificate for client. // NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
// serverNameOverride is for testing only. If set to a non empty string, // serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests. // it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
} }
// NewClientTLSFromFile constructs a TLS from the input certificate file for client. // NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
// serverNameOverride is for testing only. If set to a non empty string, // serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests. // it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
@ -214,12 +203,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
} }
// NewServerTLSFromCert constructs a TLS from the input certificate for server. // NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
} }
// NewServerTLSFromFile constructs a TLS from the input certificate file and key // NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
// file for server. // file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile) cert, err := tls.LoadX509KeyPair(certFile, keyFile)

View File

@ -1,35 +1,21 @@
// +build go1.7 // +build go1.7
// +build !go1.8
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -44,8 +30,6 @@ import (
// contains a mutex and must not be copied. // contains a mutex and must not be copied.
// //
// If cfg is nil, a new zero tls.Config is returned. // If cfg is nil, a new zero tls.Config is returned.
//
// TODO replace this function with official clone function.
func cloneTLSConfig(cfg *tls.Config) *tls.Config { func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil { if cfg == nil {
return &tls.Config{} return &tls.Config{}

View File

@ -0,0 +1,38 @@
// +build go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
)
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -2,34 +2,19 @@
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -44,8 +29,6 @@ import (
// contains a mutex and must not be copied. // contains a mutex and must not be copied.
// //
// If cfg is nil, a new zero tls.Config is returned. // If cfg is nil, a new zero tls.Config is returned.
//
// TODO replace this function with official clone function.
func cloneTLSConfig(cfg *tls.Config) *tls.Config { func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil { if cfg == nil {
return &tls.Config{} return &tls.Config{}

20
vendor/google.golang.org/grpc/doc.go generated vendored
View File

@ -1,6 +1,24 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* /*
Package grpc implements an RPC system called gRPC. Package grpc implements an RPC system called gRPC.
See www.grpc.io for more information about gRPC. See grpc.io for more information about gRPC.
*/ */
package grpc // import "google.golang.org/grpc" package grpc // import "google.golang.org/grpc"

98
vendor/google.golang.org/grpc/go16.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// +build go1.6,!go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"fmt"
"io"
"net"
"net/http"
"os"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req.Cancel = ctx.Done()
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}

98
vendor/google.golang.org/grpc/go17.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// +build go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"context"
"io"
"net"
"net/http"
"os"
netctx "golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
return err
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled, netctx.Canceled:
return codes.Canceled
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}

737
vendor/google.golang.org/grpc/grpclb.go generated vendored Normal file
View File

@ -0,0 +1,737 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/naming"
)
// Client API for LoadBalancer service.
// Mostly copied from generated pb.go file.
// To avoid circular dependency.
type loadBalancerClient struct {
cc *ClientConn
}
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
desc := &StreamDesc{
StreamName: "BalanceLoad",
ServerStreams: true,
ClientStreams: true,
}
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
if err != nil {
return nil, err
}
x := &balanceLoadClientStream{stream}
return x, nil
}
type balanceLoadClientStream struct {
ClientStream
}
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
m := new(lbpb.LoadBalanceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// NewGRPCLBBalancer creates a grpclb load balancer.
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
return &balancer{
r: r,
}
}
type remoteBalancerInfo struct {
addr string
// the server name used for authentication with the remote LB server.
name string
}
// grpclbAddrInfo consists of the information of a backend server.
type grpclbAddrInfo struct {
addr Address
connected bool
// dropForRateLimiting indicates whether this particular request should be
// dropped by the client for rate limiting.
dropForRateLimiting bool
// dropForLoadBalancing indicates whether this particular request should be
// dropped by the client for load balancing.
dropForLoadBalancing bool
}
type balancer struct {
r naming.Resolver
target string
mu sync.Mutex
seq int // a sequence number to make sure addrCh does not get stale addresses.
w naming.Watcher
addrCh chan []Address
rbs []remoteBalancerInfo
addrs []*grpclbAddrInfo
next int
waitCh chan struct{}
done bool
expTimer *time.Timer
rand *rand.Rand
clientStats lbpb.ClientStats
}
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
updates, err := w.Next()
if err != nil {
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
return err
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return ErrClientConnClosing
}
for _, update := range updates {
switch update.Op {
case naming.Add:
var exist bool
for _, v := range b.rbs {
// TODO: Is the same addr with different server name a different balancer?
if update.Addr == v.addr {
exist = true
break
}
}
if exist {
continue
}
md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
if !ok {
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
continue
}
switch md.AddrType {
case naming.Backend:
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Errorf("The name resolution does not give grpclb addresses")
continue
case naming.GRPCLB:
b.rbs = append(b.rbs, remoteBalancerInfo{
addr: update.Addr,
name: md.ServerName,
})
default:
grpclog.Errorf("Received unknow address type %d", md.AddrType)
continue
}
case naming.Delete:
for i, v := range b.rbs {
if update.Addr == v.addr {
copy(b.rbs[i:], b.rbs[i+1:])
b.rbs = b.rbs[:len(b.rbs)-1]
break
}
}
default:
grpclog.Errorf("Unknown update.Op %v", update.Op)
}
}
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
// not a load balancer.
select {
case <-ch:
default:
}
ch <- b.rbs
return nil
}
func (b *balancer) serverListExpire(seq int) {
b.mu.Lock()
defer b.mu.Unlock()
// TODO: gRPC interanls do not clear the connections when the server list is stale.
// This means RPCs will keep using the existing server list until b receives new
// server list even though the list is expired. Revisit this behavior later.
if b.done || seq < b.seq {
return
}
b.next = 0
b.addrs = nil
// Ask grpc internals to close all the corresponding connections.
b.addrCh <- nil
}
func convertDuration(d *lbpb.Duration) time.Duration {
if d == nil {
return 0
}
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
}
func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
if l == nil {
return
}
servers := l.GetServers()
expiration := convertDuration(l.GetExpirationInterval())
var (
sl []*grpclbAddrInfo
addrs []Address
)
for _, s := range servers {
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
ip := net.IP(s.IpAddress)
ipStr := ip.String()
if ip.To4() == nil {
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
// net.SplitHostPort() will return too many colons error.
ipStr = fmt.Sprintf("[%s]", ipStr)
}
addr := Address{
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
Metadata: &md,
}
sl = append(sl, &grpclbAddrInfo{
addr: addr,
dropForRateLimiting: s.DropForRateLimiting,
dropForLoadBalancing: s.DropForLoadBalancing,
})
addrs = append(addrs, addr)
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
if len(sl) > 0 {
// reset b.next to 0 when replacing the server list.
b.next = 0
b.addrs = sl
b.addrCh <- addrs
if b.expTimer != nil {
b.expTimer.Stop()
b.expTimer = nil
}
if expiration > 0 {
b.expTimer = time.AfterFunc(expiration, func() {
b.serverListExpire(seq)
})
}
}
return
}
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-done:
return
}
b.mu.Lock()
stats := b.clientStats
b.clientStats = lbpb.ClientStats{} // Clear the stats.
b.mu.Unlock()
t := time.Now()
stats.Timestamp = &lbpb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := s.Send(&lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
ClientStats: &stats,
},
}); err != nil {
grpclog.Errorf("grpclb: failed to send load report: %v", err)
return
}
}
}
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := lbc.BalanceLoad(ctx)
if err != nil {
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
return
}
b.mu.Lock()
if b.done {
b.mu.Unlock()
return
}
b.mu.Unlock()
initReq := &lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
InitialRequest: &lbpb.InitialLoadBalanceRequest{
Name: b.target,
},
},
}
if err := stream.Send(initReq); err != nil {
grpclog.Errorf("grpclb: failed to send init request: %v", err)
// TODO: backoff on retry?
return true
}
reply, err := stream.Recv()
if err != nil {
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
// TODO: backoff on retry?
return true
}
initResp := reply.GetInitialResponse()
if initResp == nil {
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
return
}
// TODO: Support delegation.
if initResp.LoadBalancerDelegate != "" {
// delegation
grpclog.Errorf("TODO: Delegation is not supported yet.")
return
}
streamDone := make(chan struct{})
defer close(streamDone)
b.mu.Lock()
b.clientStats = lbpb.ClientStats{} // Clear client stats.
b.mu.Unlock()
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
go b.sendLoadReport(stream, d, streamDone)
}
// Retrieve the server list.
for {
reply, err := stream.Recv()
if err != nil {
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
break
}
b.mu.Lock()
if b.done || seq < b.seq {
b.mu.Unlock()
return
}
b.seq++ // tick when receiving a new list of servers.
seq = b.seq
b.mu.Unlock()
if serverList := reply.GetServerList(); serverList != nil {
b.processServerList(serverList, seq)
}
}
return true
}
func (b *balancer) Start(target string, config BalancerConfig) error {
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
// TODO: Fall back to the basic direct connection if there is no name resolver.
if b.r == nil {
return errors.New("there is no name resolver installed")
}
b.target = target
b.mu.Lock()
if b.done {
b.mu.Unlock()
return ErrClientConnClosing
}
b.addrCh = make(chan []Address)
w, err := b.r.Resolve(target)
if err != nil {
b.mu.Unlock()
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
return err
}
b.w = w
b.mu.Unlock()
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
// Spawn a goroutine to monitor the name resolution of remote load balancer.
go func() {
for {
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
close(balancerAddrsCh)
return
}
}
}()
// Spawn a goroutine to talk to the remote load balancer.
go func() {
var (
cc *ClientConn
// ccError is closed when there is an error in the current cc.
// A new rb should be picked from rbs and connected.
ccError chan struct{}
rb *remoteBalancerInfo
rbs []remoteBalancerInfo
rbIdx int
)
defer func() {
if ccError != nil {
select {
case <-ccError:
default:
close(ccError)
}
}
if cc != nil {
cc.Close()
}
}()
for {
var ok bool
select {
case rbs, ok = <-balancerAddrsCh:
if !ok {
return
}
foundIdx := -1
if rb != nil {
for i, trb := range rbs {
if trb == *rb {
foundIdx = i
break
}
}
}
if foundIdx >= 0 {
if foundIdx >= 1 {
// Move the address in use to the beginning of the list.
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
rbIdx = 0
}
continue // If found, don't dial new cc.
} else if len(rbs) > 0 {
// Pick a random one from the list, instead of always using the first one.
if l := len(rbs); l > 1 && rb != nil {
tmpIdx := b.rand.Intn(l - 1)
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
}
rbIdx = 0
rb = &rbs[0]
} else {
// foundIdx < 0 && len(rbs) <= 0.
rb = nil
}
case <-ccError:
ccError = nil
if rbIdx < len(rbs)-1 {
rbIdx++
rb = &rbs[rbIdx]
} else {
rb = nil
}
}
if rb == nil {
continue
}
if cc != nil {
cc.Close()
}
// Talk to the remote load balancer to get the server list.
var (
err error
dopts []DialOption
)
if creds := config.DialCreds; creds != nil {
if rb.name != "" {
if err := creds.OverrideServerName(rb.name); err != nil {
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
continue
}
}
dopts = append(dopts, WithTransportCredentials(creds))
} else {
dopts = append(dopts, WithInsecure())
}
if dialer := config.Dialer; dialer != nil {
// WithDialer takes a different type of function, so we instead use a special DialOption here.
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
}
ccError = make(chan struct{})
cc, err = Dial(rb.addr, dopts...)
if err != nil {
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
close(ccError)
continue
}
b.mu.Lock()
b.seq++ // tick when getting a new balancer address
seq := b.seq
b.next = 0
b.mu.Unlock()
go func(cc *ClientConn, ccError chan struct{}) {
lbc := &loadBalancerClient{cc}
b.callRemoteBalancer(lbc, seq)
cc.Close()
select {
case <-ccError:
default:
close(ccError)
}
}(cc, ccError)
}
}()
return nil
}
func (b *balancer) down(addr Address, err error) {
b.mu.Lock()
defer b.mu.Unlock()
for _, a := range b.addrs {
if addr == a.addr {
a.connected = false
break
}
}
}
func (b *balancer) Up(addr Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return nil
}
var cnt int
for _, a := range b.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
cnt++
}
}
// addr is the only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && b.waitCh != nil {
close(b.waitCh)
b.waitCh = nil
}
return func(err error) {
b.down(addr, err)
}
}
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
b.mu.Lock()
if b.done {
b.mu.Unlock()
err = ErrClientConnClosing
return
}
seq := b.seq
defer func() {
if err != nil {
return
}
put = func() {
s, ok := rpcInfoFromContext(ctx)
if !ok {
return
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
b.clientStats.NumCallsFinished++
if !s.bytesSent {
b.clientStats.NumCallsFinishedWithClientFailedToSend++
} else if s.bytesReceived {
b.clientStats.NumCallsFinishedKnownReceived++
}
}
}()
b.clientStats.NumCallsStarted++
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
if len(b.addrs) == 0 {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on b.addrs for a failfast RPC.
addr = b.addrs[b.next].addr
b.next++
b.mu.Unlock()
return
}
// Wait on b.waitCh for non-failfast RPCs.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
for {
select {
case <-ctx.Done():
b.mu.Lock()
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ctx.Err()
return
case <-ch:
b.mu.Lock()
if b.done {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
}
}
}
func (b *balancer) Notify() <-chan []Address {
return b.addrCh
}
func (b *balancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return errBalancerClosed
}
b.done = true
if b.expTimer != nil {
b.expTimer.Stop()
}
if b.waitCh != nil {
close(b.waitCh)
}
if b.addrCh != nil {
close(b.addrCh)
}
if b.w != nil {
b.w.Close()
}
return nil
}

View File

@ -0,0 +1,629 @@
// Code generated by protoc-gen-go.
// source: grpclb.proto
// DO NOT EDIT!
/*
Package grpc_lb_v1 is a generated protocol buffer package.
It is generated from these files:
grpclb.proto
It has these top-level messages:
Duration
Timestamp
LoadBalanceRequest
InitialLoadBalanceRequest
ClientStats
LoadBalanceResponse
InitialLoadBalanceResponse
ServerList
Server
*/
package grpc_lb_v1
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
type LoadBalanceRequest struct {
// Types that are valid to be assigned to LoadBalanceRequestType:
// *LoadBalanceRequest_InitialRequest
// *LoadBalanceRequest_ClientStats
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
}
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
func (*LoadBalanceRequest) ProtoMessage() {}
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isLoadBalanceRequest_LoadBalanceRequestType interface {
isLoadBalanceRequest_LoadBalanceRequestType()
}
type LoadBalanceRequest_InitialRequest struct {
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
}
type LoadBalanceRequest_ClientStats struct {
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
}
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
if m != nil {
return m.LoadBalanceRequestType
}
return nil
}
func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
return x.InitialRequest
}
return nil
}
func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
return x.ClientStats
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
(*LoadBalanceRequest_InitialRequest)(nil),
(*LoadBalanceRequest_ClientStats)(nil),
}
}
func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadBalanceRequest)
// load_balance_request_type
switch x := m.LoadBalanceRequestType.(type) {
case *LoadBalanceRequest_InitialRequest:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.InitialRequest); err != nil {
return err
}
case *LoadBalanceRequest_ClientStats:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClientStats); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
}
return nil
}
func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadBalanceRequest)
switch tag {
case 1: // load_balance_request_type.initial_request
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(InitialLoadBalanceRequest)
err := b.DecodeMessage(msg)
m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
return true, err
case 2: // load_balance_request_type.client_stats
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientStats)
err := b.DecodeMessage(msg)
m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
return true, err
default:
return false, nil
}
}
func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadBalanceRequest)
// load_balance_request_type
switch x := m.LoadBalanceRequestType.(type) {
case *LoadBalanceRequest_InitialRequest:
s := proto.Size(x.InitialRequest)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadBalanceRequest_ClientStats:
s := proto.Size(x.ClientStats)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type InitialLoadBalanceRequest struct {
// Name of load balanced service (IE, balancer.service.com)
// length should be less than 256 bytes.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
func (*InitialLoadBalanceRequest) ProtoMessage() {}
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *InitialLoadBalanceRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Contains client level statistics that are useful to load balancing. Each
// count except the timestamp should be reset to zero after reporting the stats.
type ClientStats struct {
// The timestamp of generating the report.
Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
// The total number of RPCs that started.
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
// The total number of RPCs that finished.
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
// The total number of RPCs that were dropped by the client because of rate
// limiting.
NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
// The total number of RPCs that were dropped by the client because of load
// balancing.
NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
// The total number of RPCs that failed to reach a server except dropped RPCs.
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
// The total number of RPCs that finished and are known to have been received
// by a server.
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
}
func (m *ClientStats) Reset() { *m = ClientStats{} }
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
func (*ClientStats) ProtoMessage() {}
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ClientStats) GetTimestamp() *Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
func (m *ClientStats) GetNumCallsStarted() int64 {
if m != nil {
return m.NumCallsStarted
}
return 0
}
func (m *ClientStats) GetNumCallsFinished() int64 {
if m != nil {
return m.NumCallsFinished
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
if m != nil {
return m.NumCallsFinishedWithDropForRateLimiting
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
if m != nil {
return m.NumCallsFinishedWithDropForLoadBalancing
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
if m != nil {
return m.NumCallsFinishedWithClientFailedToSend
}
return 0
}
func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
if m != nil {
return m.NumCallsFinishedKnownReceived
}
return 0
}
type LoadBalanceResponse struct {
// Types that are valid to be assigned to LoadBalanceResponseType:
// *LoadBalanceResponse_InitialResponse
// *LoadBalanceResponse_ServerList
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
}
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
func (*LoadBalanceResponse) ProtoMessage() {}
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
type isLoadBalanceResponse_LoadBalanceResponseType interface {
isLoadBalanceResponse_LoadBalanceResponseType()
}
type LoadBalanceResponse_InitialResponse struct {
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
}
type LoadBalanceResponse_ServerList struct {
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
}
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
if m != nil {
return m.LoadBalanceResponseType
}
return nil
}
func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
return x.InitialResponse
}
return nil
}
func (m *LoadBalanceResponse) GetServerList() *ServerList {
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
return x.ServerList
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
(*LoadBalanceResponse_InitialResponse)(nil),
(*LoadBalanceResponse_ServerList)(nil),
}
}
func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*LoadBalanceResponse)
// load_balance_response_type
switch x := m.LoadBalanceResponseType.(type) {
case *LoadBalanceResponse_InitialResponse:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.InitialResponse); err != nil {
return err
}
case *LoadBalanceResponse_ServerList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ServerList); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
}
return nil
}
func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*LoadBalanceResponse)
switch tag {
case 1: // load_balance_response_type.initial_response
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(InitialLoadBalanceResponse)
err := b.DecodeMessage(msg)
m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
return true, err
case 2: // load_balance_response_type.server_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerList)
err := b.DecodeMessage(msg)
m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
return true, err
default:
return false, nil
}
}
func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
m := msg.(*LoadBalanceResponse)
// load_balance_response_type
switch x := m.LoadBalanceResponseType.(type) {
case *LoadBalanceResponse_InitialResponse:
s := proto.Size(x.InitialResponse)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *LoadBalanceResponse_ServerList:
s := proto.Size(x.ServerList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type InitialLoadBalanceResponse struct {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
// load_balancer_delegate and call the BalanceLoad method. Its length should
// be less than 64 bytes.
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
// This interval defines how often the client should send the client stats
// to the load balancer. Stats should only be reported when the duration is
// positive.
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
}
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
func (*InitialLoadBalanceResponse) ProtoMessage() {}
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
if m != nil {
return m.LoadBalancerDelegate
}
return ""
}
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
if m != nil {
return m.ClientStatsReportInterval
}
return nil
}
type ServerList struct {
// Contains a list of servers selected by the load balancer. The list will
// be updated when server resolutions change or as needed to balance load
// across more servers. The client should consume the server list in order
// unless instructed otherwise via the client_config.
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
// Indicates the amount of time that the client should consider this server
// list as valid. It may be considered stale after waiting this interval of
// time after receiving the list. If the interval is not positive, the
// client can assume the list is valid until the next list is received.
ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"`
}
func (m *ServerList) Reset() { *m = ServerList{} }
func (m *ServerList) String() string { return proto.CompactTextString(m) }
func (*ServerList) ProtoMessage() {}
func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ServerList) GetServers() []*Server {
if m != nil {
return m.Servers
}
return nil
}
func (m *ServerList) GetExpirationInterval() *Duration {
if m != nil {
return m.ExpirationInterval
}
return nil
}
// Contains server information. When none of the [drop_for_*] fields are true,
// use the other fields. When drop_for_rate_limiting is true, ignore all other
// fields. Use drop_for_load_balancing only when it is true and
// drop_for_rate_limiting is false.
type Server struct {
// A resolved address for the server, serialized in network-byte-order. It may
// either be an IPv4 or IPv6 address.
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
// A resolved port number for the server.
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
// An opaque but printable token given to the frontend for each pick. All
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
//
// Its length is variable but less than 50 bytes.
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
// Indicates whether this particular request should be dropped by the client
// for rate limiting.
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
// Indicates whether this particular request should be dropped by the client
// for load balancing.
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
}
func (m *Server) Reset() { *m = Server{} }
func (m *Server) String() string { return proto.CompactTextString(m) }
func (*Server) ProtoMessage() {}
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *Server) GetIpAddress() []byte {
if m != nil {
return m.IpAddress
}
return nil
}
func (m *Server) GetPort() int32 {
if m != nil {
return m.Port
}
return 0
}
func (m *Server) GetLoadBalanceToken() string {
if m != nil {
return m.LoadBalanceToken
}
return ""
}
func (m *Server) GetDropForRateLimiting() bool {
if m != nil {
return m.DropForRateLimiting
}
return false
}
func (m *Server) GetDropForLoadBalancing() bool {
if m != nil {
return m.DropForLoadBalancing
}
return false
}
func init() {
proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
}
func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 733 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34,
0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a,
0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9,
0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1,
0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92,
0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51,
0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0,
0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51,
0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7,
0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4,
0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13,
0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67,
0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed,
0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93,
0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f,
0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2,
0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4,
0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd,
0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2,
0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd,
0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71,
0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a,
0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c,
0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c,
0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0,
0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84,
0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37,
0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5,
0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f,
0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07,
0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71,
0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f,
0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87,
0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94,
0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56,
0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9,
0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a,
0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e,
0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87,
0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28,
0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70,
0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94,
0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5,
0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff,
0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00,
}

View File

@ -0,0 +1,164 @@
// Copyright 2016 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package grpc.lb.v1;
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive.
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}
service LoadBalancer {
// Bidirectional rpc to get a list of servers.
rpc BalanceLoad(stream LoadBalanceRequest)
returns (stream LoadBalanceResponse);
}
message LoadBalanceRequest {
oneof load_balance_request_type {
// This message should be sent on the first request to the load balancer.
InitialLoadBalanceRequest initial_request = 1;
// The client stats should be periodically reported to the load balancer
// based on the duration defined in the InitialLoadBalanceResponse.
ClientStats client_stats = 2;
}
}
message InitialLoadBalanceRequest {
// Name of load balanced service (IE, balancer.service.com)
// length should be less than 256 bytes.
string name = 1;
}
// Contains client level statistics that are useful to load balancing. Each
// count except the timestamp should be reset to zero after reporting the stats.
message ClientStats {
// The timestamp of generating the report.
Timestamp timestamp = 1;
// The total number of RPCs that started.
int64 num_calls_started = 2;
// The total number of RPCs that finished.
int64 num_calls_finished = 3;
// The total number of RPCs that were dropped by the client because of rate
// limiting.
int64 num_calls_finished_with_drop_for_rate_limiting = 4;
// The total number of RPCs that were dropped by the client because of load
// balancing.
int64 num_calls_finished_with_drop_for_load_balancing = 5;
// The total number of RPCs that failed to reach a server except dropped RPCs.
int64 num_calls_finished_with_client_failed_to_send = 6;
// The total number of RPCs that finished and are known to have been received
// by a server.
int64 num_calls_finished_known_received = 7;
}
message LoadBalanceResponse {
oneof load_balance_response_type {
// This message should be sent on the first response to the client.
InitialLoadBalanceResponse initial_response = 1;
// Contains the list of servers selected by the load balancer. The client
// should send requests to these servers in the specified order.
ServerList server_list = 2;
}
}
message InitialLoadBalanceResponse {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
// load_balancer_delegate and call the BalanceLoad method. Its length should
// be less than 64 bytes.
string load_balancer_delegate = 1;
// This interval defines how often the client should send the client stats
// to the load balancer. Stats should only be reported when the duration is
// positive.
Duration client_stats_report_interval = 2;
}
message ServerList {
// Contains a list of servers selected by the load balancer. The list will
// be updated when server resolutions change or as needed to balance load
// across more servers. The client should consume the server list in order
// unless instructed otherwise via the client_config.
repeated Server servers = 1;
// Indicates the amount of time that the client should consider this server
// list as valid. It may be considered stale after waiting this interval of
// time after receiving the list. If the interval is not positive, the
// client can assume the list is valid until the next list is received.
Duration expiration_interval = 3;
}
// Contains server information. When none of the [drop_for_*] fields are true,
// use the other fields. When drop_for_rate_limiting is true, ignore all other
// fields. Use drop_for_load_balancing only when it is true and
// drop_for_rate_limiting is false.
message Server {
// A resolved address for the server, serialized in network-byte-order. It may
// either be an IPv4 or IPv6 address.
bytes ip_address = 1;
// A resolved port number for the server.
int32 port = 2;
// An opaque but printable token given to the frontend for each pick. All
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
//
// Its length is variable but less than 50 bytes.
string load_balance_token = 3;
// Indicates whether this particular request should be dropped by the client
// for rate limiting.
bool drop_for_rate_limiting = 4;
// Indicates whether this particular request should be dropped by the client
// for load balancing.
bool drop_for_load_balancing = 5;
}

123
vendor/google.golang.org/grpc/grpclog/grpclog.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpclog defines logging for grpc.
//
// All logs in transport package only go to verbose level 2.
// All logs in other packages in grpc are logged in spite of the verbosity level.
//
// In the default logger,
// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
package grpclog // import "google.golang.org/grpc/grpclog"
import "os"
var logger = newLoggerV2()
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
return logger.V(l)
}
// Info logs to the INFO log.
func Info(args ...interface{}) {
logger.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...interface{}) {
logger.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...interface{}) {
logger.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...interface{}) {
logger.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...interface{}) {
logger.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...interface{}) {
logger.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...interface{}) {
logger.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...interface{}) {
logger.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...interface{}) {
logger.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...interface{}) {
logger.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calles os.Exit() with exit code 1.
func Fatalf(format string, args ...interface{}) {
logger.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
// It calle os.Exit()) with exit code 1.
func Fatalln(args ...interface{}) {
logger.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
// Deprecated: use Info.
func Print(args ...interface{}) {
logger.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
// Deprecated: use Infof.
func Printf(format string, args ...interface{}) {
logger.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
// Deprecated: use Infoln.
func Println(args ...interface{}) {
logger.Infoln(args...)
}

View File

@ -1,52 +1,25 @@
/* /*
* *
* Copyright 2015, Google Inc. * Copyright 2015 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
/* package grpclog
Package grpclog defines logging for grpc.
*/
package grpclog // import "google.golang.org/grpc/grpclog"
import (
"log"
"os"
)
// Use golang's standard logger by default.
// Access is not mutex-protected: do not modify except in init()
// functions.
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
// Logger mimics golang's standard Logger as an interface. // Logger mimics golang's standard Logger as an interface.
// Deprecated: use LoggerV2.
type Logger interface { type Logger interface {
Fatal(args ...interface{}) Fatal(args ...interface{})
Fatalf(format string, args ...interface{}) Fatalf(format string, args ...interface{})
@ -58,36 +31,53 @@ type Logger interface {
// SetLogger sets the logger that is used in grpc. Call only from // SetLogger sets the logger that is used in grpc. Call only from
// init() functions. // init() functions.
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) { func SetLogger(l Logger) {
logger = l logger = &loggerWrapper{Logger: l}
} }
// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. // loggerWrapper wraps Logger into a LoggerV2.
func Fatal(args ...interface{}) { type loggerWrapper struct {
logger.Fatal(args...) Logger
} }
// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. func (g *loggerWrapper) Info(args ...interface{}) {
func Fatalf(format string, args ...interface{}) { g.Logger.Print(args...)
logger.Fatalf(format, args...)
} }
// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. func (g *loggerWrapper) Infoln(args ...interface{}) {
func Fatalln(args ...interface{}) { g.Logger.Println(args...)
logger.Fatalln(args...)
} }
// Print prints to the logger. Arguments are handled in the manner of fmt.Print. func (g *loggerWrapper) Infof(format string, args ...interface{}) {
func Print(args ...interface{}) { g.Logger.Printf(format, args...)
logger.Print(args...)
} }
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. func (g *loggerWrapper) Warning(args ...interface{}) {
func Printf(format string, args ...interface{}) { g.Logger.Print(args...)
logger.Printf(format, args...)
} }
// Println prints to the logger. Arguments are handled in the manner of fmt.Println. func (g *loggerWrapper) Warningln(args ...interface{}) {
func Println(args ...interface{}) { g.Logger.Println(args...)
logger.Println(args...) }
func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Error(args ...interface{}) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Errorln(args ...interface{}) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) V(l int) bool {
// Returns true for all verbose level.
return true
} }

195
vendor/google.golang.org/grpc/grpclog/loggerv2.go generated vendored Normal file
View File

@ -0,0 +1,195 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"io"
"io/ioutil"
"log"
"os"
"strconv"
)
// LoggerV2 does underlying logging work for grpclog.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{})
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{})
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{})
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{})
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{})
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{})
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{})
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{})
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{})
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{})
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{})
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{})
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
logger = l
}
const (
// infoLog indicates Info severity.
infoLog int = iota
// warningLog indicates Warning severity.
warningLog
// errorLog indicates Error severity.
errorLog
// fatalLog indicates Fatal severity.
fatalLog
)
// severityName contains the string representation of each severity.
var severityName = []string{
infoLog: "INFO",
warningLog: "WARNING",
errorLog: "ERROR",
fatalLog: "FATAL",
}
// loggerT is the default logger used by grpclog.
type loggerT struct {
m []*log.Logger
v int
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
// Error logs will be written to errorW, warningW and infoW.
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
var m []*log.Logger
m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
return &loggerT{m: m, v: v}
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
// All logs are written to stderr.
func newLoggerV2() LoggerV2 {
errorW := ioutil.Discard
warningW := ioutil.Discard
infoW := ioutil.Discard
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
switch logLevel {
case "", "ERROR", "error": // If env is unset, set level to ERROR.
errorW = os.Stderr
case "WARNING", "warning":
warningW = os.Stderr
case "INFO", "info":
infoW = os.Stderr
}
var v int
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl
}
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
}
func (g *loggerT) Info(args ...interface{}) {
g.m[infoLog].Print(args...)
}
func (g *loggerT) Infoln(args ...interface{}) {
g.m[infoLog].Println(args...)
}
func (g *loggerT) Infof(format string, args ...interface{}) {
g.m[infoLog].Printf(format, args...)
}
func (g *loggerT) Warning(args ...interface{}) {
g.m[warningLog].Print(args...)
}
func (g *loggerT) Warningln(args ...interface{}) {
g.m[warningLog].Println(args...)
}
func (g *loggerT) Warningf(format string, args ...interface{}) {
g.m[warningLog].Printf(format, args...)
}
func (g *loggerT) Error(args ...interface{}) {
g.m[errorLog].Print(args...)
}
func (g *loggerT) Errorln(args ...interface{}) {
g.m[errorLog].Println(args...)
}
func (g *loggerT) Errorf(format string, args ...interface{}) {
g.m[errorLog].Printf(format, args...)
}
func (g *loggerT) Fatal(args ...interface{}) {
g.m[fatalLog].Fatal(args...)
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
}
func (g *loggerT) Fatalln(args ...interface{}) {
g.m[fatalLog].Fatalln(args...)
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
}
func (g *loggerT) Fatalf(format string, args ...interface{}) {
g.m[fatalLog].Fatalf(format, args...)
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
}
func (g *loggerT) V(l int) bool {
return l <= g.v
}

View File

@ -0,0 +1,176 @@
// Code generated by protoc-gen-go.
// source: health.proto
// DO NOT EDIT!
/*
Package grpc_health_v1 is a generated protocol buffer package.
It is generated from these files:
health.proto
It has these top-level messages:
HealthCheckRequest
HealthCheckResponse
*/
package grpc_health_v1
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type HealthCheckResponse_ServingStatus int32
const (
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
)
var HealthCheckResponse_ServingStatus_name = map[int32]string{
0: "UNKNOWN",
1: "SERVING",
2: "NOT_SERVING",
}
var HealthCheckResponse_ServingStatus_value = map[string]int32{
"UNKNOWN": 0,
"SERVING": 1,
"NOT_SERVING": 2,
}
func (x HealthCheckResponse_ServingStatus) String() string {
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
}
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
return fileDescriptor0, []int{1, 0}
}
type HealthCheckRequest struct {
Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
}
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
func (*HealthCheckRequest) ProtoMessage() {}
func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type HealthCheckResponse struct {
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
}
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
func (*HealthCheckResponse) ProtoMessage() {}
func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func init() {
proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Health service
type HealthClient interface {
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
}
type healthClient struct {
cc *grpc.ClientConn
}
func NewHealthClient(cc *grpc.ClientConn) HealthClient {
return &healthClient{cc}
}
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Health service
type HealthServer interface {
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
}
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
s.RegisterService(&_Health_serviceDesc, srv)
}
func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HealthCheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HealthServer).Check(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.health.v1.Health/Check",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Health_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpc.health.v1.Health",
HandlerType: (*HealthServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Check",
Handler: _Health_Check_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "health.proto",
}
func init() { proto.RegisterFile("health.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 204 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,
0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83,
0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41,
0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9,
0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14,
0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5,
0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32,
0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10,
0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a,
0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13,
0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09,
0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04,
0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,34 @@
// Copyright 2017 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package grpc.health.v1;
message HealthCheckRequest {
string service = 1;
}
message HealthCheckResponse {
enum ServingStatus {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
}
ServingStatus status = 1;
}
service Health{
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
}

70
vendor/google.golang.org/grpc/health/health.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package health provides some utility functions to health-check a server. The implementation
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
package health
import (
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
)
// Server implements `service Health`.
type Server struct {
mu sync.Mutex
// statusMap stores the serving status of the services this Server monitors.
statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
}
// NewServer returns a new Server.
func NewServer() *Server {
return &Server{
statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus),
}
}
// Check implements `service Health`.
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if in.Service == "" {
// check the server overall health status.
return &healthpb.HealthCheckResponse{
Status: healthpb.HealthCheckResponse_SERVING,
}, nil
}
if status, ok := s.statusMap[in.Service]; ok {
return &healthpb.HealthCheckResponse{
Status: status,
}, nil
}
return nil, grpc.Errorf(codes.NotFound, "unknown service")
}
// SetServingStatus is called when need to reset the serving status of a service
// or insert a new service entry into the statusMap.
func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
s.mu.Lock()
s.statusMap[service] = status
s.mu.Unlock()
}

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -40,17 +25,17 @@ import (
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it. // and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API. // This is an EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream. // Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. // operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
// This is the EXPERIMENTAL API. // This is an EXPERIMENTAL API.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on // UnaryServerInfo consists of various information about a unary RPC on

View File

@ -1,32 +1,17 @@
/* /*
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */

65
vendor/google.golang.org/grpc/keepalive/keepalive.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package keepalive defines configurable parameters for point-to-point healthcheck.
package keepalive
import (
"time"
)
// ClientParameters is used to set keepalive parameters on the client-side.
// These configure how the client will actively probe to notice when a connection is broken
// and send pings so intermediaries will be aware of the liveness of the connection.
// Make sure these parameters are set in coordination with the keepalive policy on the server,
// as incompatible settings can result in closing of connection.
type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
Time time.Duration // The current default value is infinity.
// After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
// If true, client runs keepalive checks even with no active RPCs.
PermitWithoutStream bool // false by default.
}
// ServerParameters is used to set keepalive and max-age parameters on the server-side.
type ServerParameters struct {
// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
MaxConnectionIdle time.Duration // The current default value is infinity.
// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
MaxConnectionAge time.Duration // The current default value is infinity.
// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
Time time.Duration // The current default value is 2 hours.
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
}
// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
// Server will close connection with a client that violates this policy.
type EnforcementPolicy struct {
// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
MinTime time.Duration // The current default value is 5 minutes.
// If true, server expects keepalive pings even when there are no active streams(RPCs).
PermitWithoutStream bool // false by default.
}

View File

@ -1,91 +1,53 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
// Package metadata define the structure of the metadata supported by gRPC library. // Package metadata define the structure of the metadata supported by gRPC library.
// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
package metadata // import "google.golang.org/grpc/metadata" package metadata // import "google.golang.org/grpc/metadata"
import ( import (
"encoding/base64"
"fmt" "fmt"
"strings" "strings"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
const ( // DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
binHdrSuffix = "-bin"
)
// encodeKeyValue encodes key and value qualified for transmission via gRPC.
// Transmitting binary headers violates HTTP/2 spec.
// TODO(zhaoq): Maybe check if k is ASCII also.
func encodeKeyValue(k, v string) (string, string) {
k = strings.ToLower(k)
if strings.HasSuffix(k, binHdrSuffix) {
val := base64.StdEncoding.EncodeToString([]byte(v))
v = string(val)
}
return k, v
}
// DecodeKeyValue returns the original key and value corresponding to the
// encoded data in k, v.
// If k is a binary header and v contains comma, v is split on comma before decoded,
// and the decoded v will be joined with comma before returned.
func DecodeKeyValue(k, v string) (string, string, error) { func DecodeKeyValue(k, v string) (string, string, error) {
if !strings.HasSuffix(k, binHdrSuffix) {
return k, v, nil return k, v, nil
}
vvs := strings.Split(v, ",")
for i, vv := range vvs {
val, err := base64.StdEncoding.DecodeString(vv)
if err != nil {
return "", "", err
}
vvs[i] = string(val)
}
return k, strings.Join(vvs, ","), nil
} }
// MD is a mapping from metadata keys to values. Users should use the following // MD is a mapping from metadata keys to values. Users should use the following
// two convenience functions New and Pairs to generate MD. // two convenience functions New and Pairs to generate MD.
type MD map[string][]string type MD map[string][]string
// New creates a MD from given key-value map. // New creates an MD from a given key-value map.
//
// Only the following ASCII characters are allowed in keys:
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
// Uppercase letters are automatically converted to lowercase.
func New(m map[string]string) MD { func New(m map[string]string) MD {
md := MD{} md := MD{}
for k, v := range m { for k, val := range m {
key, val := encodeKeyValue(k, v) key := strings.ToLower(k)
md[key] = append(md[key], val) md[key] = append(md[key], val)
} }
return md return md
@ -93,19 +55,25 @@ func New(m map[string]string) MD {
// Pairs returns an MD formed by the mapping of key, value ... // Pairs returns an MD formed by the mapping of key, value ...
// Pairs panics if len(kv) is odd. // Pairs panics if len(kv) is odd.
//
// Only the following ASCII characters are allowed in keys:
// - digits: 0-9
// - uppercase letters: A-Z (normalized to lower)
// - lowercase letters: a-z
// - special characters: -_.
// Uppercase letters are automatically converted to lowercase.
func Pairs(kv ...string) MD { func Pairs(kv ...string) MD {
if len(kv)%2 == 1 { if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
} }
md := MD{} md := MD{}
var k string var key string
for i, s := range kv { for i, s := range kv {
if i%2 == 0 { if i%2 == 0 {
k = s key = strings.ToLower(s)
continue continue
} }
key, val := encodeKeyValue(k, s) md[key] = append(md[key], s)
md[key] = append(md[key], val)
} }
return md return md
} }
@ -120,9 +88,9 @@ func (md MD) Copy() MD {
return Join(md) return Join(md)
} }
// Join joins any number of MDs into a single MD. // Join joins any number of mds into a single MD.
// The order of values for each key is determined by the order in which // The order of values for each key is determined by the order in which
// the MDs containing those values are presented to Join. // the mds containing those values are presented to Join.
func Join(mds ...MD) MD { func Join(mds ...MD) MD {
out := MD{} out := MD{}
for _, md := range mds { for _, md := range mds {
@ -133,17 +101,41 @@ func Join(mds ...MD) MD {
return out return out
} }
type mdKey struct{} type mdIncomingKey struct{}
type mdOutgoingKey struct{}
// NewContext creates a new context with md attached. // NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
func NewContext(ctx context.Context, md MD) context.Context { func NewContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdKey{}, md) return NewOutgoingContext(ctx, md)
} }
// FromContext returns the MD in ctx if it exists. // NewIncomingContext creates a new context with incoming md attached.
// The returned md should be immutable, writing to it may cause races. func NewIncomingContext(ctx context.Context, md MD) context.Context {
// Modification should be made to the copies of the returned md. return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, md)
}
// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated.
func FromContext(ctx context.Context) (md MD, ok bool) { func FromContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdKey{}).(MD) return FromIncomingContext(ctx)
}
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to copies of the returned MD.
func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdIncomingKey{}).(MD)
return
}
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to the copies of the returned MD.
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
return return
} }

292
vendor/google.golang.org/grpc/naming/dns_resolver.go generated vendored Normal file
View File

@ -0,0 +1,292 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package naming
import (
"errors"
"fmt"
"net"
"strconv"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
)
const (
defaultPort = "443"
defaultFreq = time.Minute * 30
)
var (
errMissingAddr = errors.New("missing address")
errWatcherClose = errors.New("watcher has been closed")
)
// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
// create watchers that poll the DNS server using the frequency set by freq.
func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
return &dnsResolver{freq: freq}, nil
}
// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
// watchers that poll the DNS server using the default frequency defined by defaultFreq.
func NewDNSResolver() (Resolver, error) {
return NewDNSResolverWithFreq(defaultFreq)
}
// dnsResolver handles name resolution for names following the DNS scheme
type dnsResolver struct {
// frequency of polling the DNS server that the watchers created by this resolver will use.
freq time.Duration
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
// If addr is an IPv4 address, return the addr and ok = true.
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
func formatIP(addr string) (addrIP string, ok bool) {
ip := net.ParseIP(addr)
if ip == nil {
return "", false
}
if ip.To4() != nil {
return addr, true
}
return "[" + addr + "]", true
}
// parseTarget takes the user input target string, returns formatted host and port info.
// If target doesn't specify a port, set the port to be the defaultPort.
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
// are strippd when setting the host.
// examples:
// target: "www.google.com" returns host: "www.google.com", port: "443"
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
// target: ":80" returns host: "localhost", port: "80"
// target: ":" returns host: "localhost", port: "443"
func parseTarget(target string) (host, port string, err error) {
if target == "" {
return "", "", errMissingAddr
}
if ip := net.ParseIP(target); ip != nil {
// target is an IPv4 or IPv6(without brackets) address
return target, defaultPort, nil
}
if host, port, err := net.SplitHostPort(target); err == nil {
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
host = "localhost"
}
if port == "" {
// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
port = defaultPort
}
return host, port, nil
}
if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
// target doesn't have port
return host, port, nil
}
return "", "", fmt.Errorf("invalid target address %v", target)
}
// Resolve creates a watcher that watches the name resolution of the target.
func (r *dnsResolver) Resolve(target string) (Watcher, error) {
host, port, err := parseTarget(target)
if err != nil {
return nil, err
}
if net.ParseIP(host) != nil {
ipWatcher := &ipWatcher{
updateChan: make(chan *Update, 1),
}
host, _ = formatIP(host)
ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
return ipWatcher, nil
}
ctx, cancel := context.WithCancel(context.Background())
return &dnsWatcher{
r: r,
host: host,
port: port,
ctx: ctx,
cancel: cancel,
t: time.NewTimer(0),
}, nil
}
// dnsWatcher watches for the name resolution update for a specific target
type dnsWatcher struct {
r *dnsResolver
host string
port string
// The latest resolved address list
curAddrs []*Update
ctx context.Context
cancel context.CancelFunc
t *time.Timer
}
// ipWatcher watches for the name resolution update for an IP address.
type ipWatcher struct {
updateChan chan *Update
}
// Next returns the adrress resolution Update for the target. For IP address,
// the resolution is itself, thus polling name server is unncessary. Therefore,
// Next() will return an Update the first time it is called, and will be blocked
// for all following calls as no Update exisits until watcher is closed.
func (i *ipWatcher) Next() ([]*Update, error) {
u, ok := <-i.updateChan
if !ok {
return nil, errWatcherClose
}
return []*Update{u}, nil
}
// Close closes the ipWatcher.
func (i *ipWatcher) Close() {
close(i.updateChan)
}
// AddressType indicates the address type returned by name resolution.
type AddressType uint8
const (
// Backend indicates the server is a backend server.
Backend AddressType = iota
// GRPCLB indicates the server is a grpclb load balancer.
GRPCLB
)
// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
// name resolver used by the grpclb balancer is required to provide this type of metadata in
// its address updates.
type AddrMetadataGRPCLB struct {
// AddrType is the type of server (grpc load balancer or backend).
AddrType AddressType
// ServerName is the name of the grpc load balancer. Used for authentication.
ServerName string
}
// compileUpdate compares the old resolved addresses and newly resolved addresses,
// and generates an update list
func (w *dnsWatcher) compileUpdate(newAddrs []*Update) []*Update {
update := make(map[Update]bool)
for _, u := range newAddrs {
update[*u] = true
}
for _, u := range w.curAddrs {
if _, ok := update[*u]; ok {
delete(update, *u)
continue
}
update[Update{Addr: u.Addr, Op: Delete, Metadata: u.Metadata}] = true
}
res := make([]*Update, 0, len(update))
for k := range update {
tmp := k
res = append(res, &tmp)
}
return res
}
func (w *dnsWatcher) lookupSRV() []*Update {
var newAddrs []*Update
_, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
if err != nil {
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
return nil
}
for _, s := range srvs {
lbAddrs, err := lookupHost(w.ctx, s.Target)
if err != nil {
grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
continue
}
for _, a := range lbAddrs {
a, ok := formatIP(a)
if !ok {
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
continue
}
newAddrs = append(newAddrs, &Update{Addr: a + ":" + strconv.Itoa(int(s.Port)),
Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}})
}
}
return newAddrs
}
func (w *dnsWatcher) lookupHost() []*Update {
var newAddrs []*Update
addrs, err := lookupHost(w.ctx, w.host)
if err != nil {
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
return nil
}
for _, a := range addrs {
a, ok := formatIP(a)
if !ok {
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
continue
}
newAddrs = append(newAddrs, &Update{Addr: a + ":" + w.port})
}
return newAddrs
}
func (w *dnsWatcher) lookup() []*Update {
newAddrs := w.lookupSRV()
if newAddrs == nil {
// If failed to get any balancer address (either no corresponding SRV for the
// target, or caused by failure during resolution/parsing of the balancer target),
// return any A record info available.
newAddrs = w.lookupHost()
}
result := w.compileUpdate(newAddrs)
w.curAddrs = newAddrs
return result
}
// Next returns the resolved address update(delta) for the target. If there's no
// change, it will sleep for 30 mins and try to resolve again after that.
func (w *dnsWatcher) Next() ([]*Update, error) {
for {
select {
case <-w.ctx.Done():
return nil, errWatcherClose
case <-w.t.C:
}
result := w.lookup()
// Next lookup should happen after an interval defined by w.r.freq.
w.t.Reset(w.r.freq)
if len(result) > 0 {
return result, nil
}
}
}
func (w *dnsWatcher) Close() {
w.cancel()
}

34
vendor/google.golang.org/grpc/naming/go17.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
// +build go1.6, !go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package naming
import (
"net"
"golang.org/x/net/context"
)
var (
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
return net.LookupSRV(service, proto, name)
}
)

28
vendor/google.golang.org/grpc/naming/go18.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
// +build go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package naming
import "net"
var (
lookupHost = net.DefaultResolver.LookupHost
lookupSRV = net.DefaultResolver.LookupSRV
)

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -42,7 +27,8 @@ import (
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
) )
// Peer contains the information of the peer for an RPC. // Peer contains the information of the peer for an RPC, such as the address
// and authentication information.
type Peer struct { type Peer struct {
// Addr is the peer address. // Addr is the peer address.
Addr net.Addr Addr net.Addr

130
vendor/google.golang.org/grpc/proxy.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"net/url"
"golang.org/x/net/context"
)
var (
// errDisabled indicates that proxy is disabled for the address.
errDisabled = errors.New("proxy is disabled for the address")
// The following variable will be overwritten in the tests.
httpProxyFromEnvironment = http.ProxyFromEnvironment
)
func mapAddress(ctx context.Context, address string) (string, error) {
req := &http.Request{
URL: &url.URL{
Scheme: "https",
Host: address,
},
}
url, err := httpProxyFromEnvironment(req)
if err != nil {
return "", err
}
if url == nil {
return "", errDisabled
}
return url.Host, nil
}
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
// It's possible that this reader reads more than what's need for the response and stores
// those bytes in the buffer.
// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
// bytes in the buffer.
type bufConn struct {
net.Conn
r io.Reader
}
func (c *bufConn) Read(b []byte) (int, error) {
return c.r.Read(b)
}
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
}
}()
req := (&http.Request{
Method: http.MethodConnect,
URL: &url.URL{Host: addr},
Header: map[string][]string{"User-Agent": {grpcUA}},
})
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
}
r := bufio.NewReader(conn)
resp, err := http.ReadResponse(r, req)
if err != nil {
return nil, fmt.Errorf("reading server HTTP response: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
return &bufConn{Conn: conn, r: r}, nil
}
// newProxyDialer returns a dialer that connects to proxy first if necessary.
// The returned dialer checks if a proxy is necessary, dial to the proxy with the
// provided dialer, does HTTP CONNECT handshake and returns the connection.
func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (conn net.Conn, err error) {
var skipHandshake bool
newAddr, err := mapAddress(ctx, addr)
if err != nil {
if err != errDisabled {
return nil, err
}
skipHandshake = true
newAddr = addr
}
conn, err = dialer(ctx, newAddr)
if err != nil {
return
}
if !skipHandshake {
conn, err = doHTTPConnectHandshake(ctx, conn, addr)
}
return
}
}

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -37,47 +22,22 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"os" "sync"
"time" "time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats" "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
// Codec defines the interface gRPC uses to encode and decode messages.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
// String returns the name of the Codec implementation. The returned
// string will be used as part of content type in transmission.
String() string
}
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
type protoCodec struct{}
func (protoCodec) Marshal(v interface{}) ([]byte, error) {
return proto.Marshal(v.(proto.Message))
}
func (protoCodec) Unmarshal(data []byte, v interface{}) error {
return proto.Unmarshal(data, v.(proto.Message))
}
func (protoCodec) String() string {
return "proto"
}
// Compressor defines the interface gRPC uses to compress a message. // Compressor defines the interface gRPC uses to compress a message.
type Compressor interface { type Compressor interface {
// Do compresses p into w. // Do compresses p into w.
@ -86,16 +46,24 @@ type Compressor interface {
Type() string Type() string
} }
// NewGZIPCompressor creates a Compressor based on GZIP. type gzipCompressor struct {
func NewGZIPCompressor() Compressor { pool sync.Pool
return &gzipCompressor{}
} }
type gzipCompressor struct { // NewGZIPCompressor creates a Compressor based on GZIP.
func NewGZIPCompressor() Compressor {
return &gzipCompressor{
pool: sync.Pool{
New: func() interface{} {
return gzip.NewWriter(ioutil.Discard)
},
},
}
} }
func (c *gzipCompressor) Do(w io.Writer, p []byte) error { func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
z := gzip.NewWriter(w) z := c.pool.Get().(*gzip.Writer)
z.Reset(w)
if _, err := z.Write(p); err != nil { if _, err := z.Write(p); err != nil {
return err return err
} }
@ -115,6 +83,7 @@ type Decompressor interface {
} }
type gzipDecompressor struct { type gzipDecompressor struct {
pool sync.Pool
} }
// NewGZIPDecompressor creates a Decompressor based on GZIP. // NewGZIPDecompressor creates a Decompressor based on GZIP.
@ -123,11 +92,26 @@ func NewGZIPDecompressor() Decompressor {
} }
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
z, err := gzip.NewReader(r) var z *gzip.Reader
switch maybeZ := d.pool.Get().(type) {
case nil:
newZ, err := gzip.NewReader(r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer z.Close() z = newZ
case *gzip.Reader:
z = maybeZ
if err := z.Reset(r); err != nil {
d.pool.Put(z)
return nil, err
}
}
defer func() {
z.Close()
d.pool.Put(z)
}()
return ioutil.ReadAll(z) return ioutil.ReadAll(z)
} }
@ -140,7 +124,11 @@ type callInfo struct {
failFast bool failFast bool
headerMD metadata.MD headerMD metadata.MD
trailerMD metadata.MD trailerMD metadata.MD
peer *peer.Peer
traceInfo traceInfo // in trace.go traceInfo traceInfo // in trace.go
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
} }
var defaultCallInfo = callInfo{failFast: true} var defaultCallInfo = callInfo{failFast: true}
@ -157,6 +145,14 @@ type CallOption interface {
after(*callInfo) after(*callInfo)
} }
// EmptyCallOption does not alter the Call configuration.
// It can be embedded in another structure to carry satellite data for use
// by interceptors.
type EmptyCallOption struct{}
func (EmptyCallOption) before(*callInfo) error { return nil }
func (EmptyCallOption) after(*callInfo) {}
type beforeCall func(c *callInfo) error type beforeCall func(c *callInfo) error
func (o beforeCall) before(c *callInfo) error { return o(c) } func (o beforeCall) before(c *callInfo) error { return o(c) }
@ -183,12 +179,23 @@ func Trailer(md *metadata.MD) CallOption {
}) })
} }
// Peer returns a CallOption that retrieves peer information for a
// unary RPC.
func Peer(peer *peer.Peer) CallOption {
return afterCall(func(c *callInfo) {
if c.peer != nil {
*peer = *c.peer
}
})
}
// FailFast configures the action to take when an RPC is attempted on broken // FailFast configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If failfast is true, the RPC will fail // connections or unreachable servers. If failfast is true, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a // immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will retry // connection is available (or the call is canceled or times out) and will retry
// the call if it fails due to a transient error. Please refer to // the call if it fails due to a transient error. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
// Note: failFast is default to true.
func FailFast(failFast bool) CallOption { func FailFast(failFast bool) CallOption {
return beforeCall(func(c *callInfo) error { return beforeCall(func(c *callInfo) error {
c.failFast = failFast c.failFast = failFast
@ -196,6 +203,31 @@ func FailFast(failFast bool) CallOption {
}) })
} }
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
func MaxCallRecvMsgSize(s int) CallOption {
return beforeCall(func(o *callInfo) error {
o.maxReceiveMessageSize = &s
return nil
})
}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
func MaxCallSendMsgSize(s int) CallOption {
return beforeCall(func(o *callInfo) error {
o.maxSendMessageSize = &s
return nil
})
}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
return beforeCall(func(c *callInfo) error {
c.creds = creds
return nil
})
}
// The format of the payload: compressed or not? // The format of the payload: compressed or not?
type payloadFormat uint8 type payloadFormat uint8
@ -212,7 +244,7 @@ type parser struct {
r io.Reader r io.Reader
// The header of a gRPC message. Find more detail // The header of a gRPC message. Find more detail
// at http://www.grpc.io/docs/guides/wire.html. // at https://grpc.io/docs/guides/wire.html.
header [5]byte header [5]byte
} }
@ -229,8 +261,8 @@ type parser struct {
// No other error values or types must be returned, which also means // No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible // that the underlying io.Reader must not return an incompatible
// error. // error.
func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := io.ReadFull(p.r, p.header[:]); err != nil { if _, err := p.r.Read(p.header[:]); err != nil {
return 0, nil, err return 0, nil, err
} }
@ -240,13 +272,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
if length == 0 { if length == 0 {
return pf, nil, nil return pf, nil, nil
} }
if length > uint32(maxMsgSize) { if length > uint32(maxReceiveMessageSize) {
return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
} }
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
// of making it for each message: // of making it for each message:
msg = make([]byte, int(length)) msg = make([]byte, int(length))
if _, err := io.ReadFull(p.r, msg); err != nil { if _, err := p.r.Read(msg); err != nil {
if err == io.EOF { if err == io.EOF {
err = io.ErrUnexpectedEOF err = io.ErrUnexpectedEOF
} }
@ -267,7 +299,7 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
// TODO(zhaoq): optimize to reduce memory alloc and copying. // TODO(zhaoq): optimize to reduce memory alloc and copying.
b, err = c.Marshal(msg) b, err = c.Marshal(msg)
if err != nil { if err != nil {
return nil, err return nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
} }
if outPayload != nil { if outPayload != nil {
outPayload.Payload = msg outPayload.Payload = msg
@ -277,14 +309,14 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
} }
if cp != nil { if cp != nil {
if err := cp.Do(cbuf, b); err != nil { if err := cp.Do(cbuf, b); err != nil {
return nil, err return nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
} }
b = cbuf.Bytes() b = cbuf.Bytes()
} }
length = uint(len(b)) length = uint(len(b))
} }
if length > math.MaxUint32 { if length > math.MaxUint32 {
return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) return nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", length)
} }
const ( const (
@ -325,8 +357,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er
return nil return nil
} }
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error { func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxMsgSize) pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil { if err != nil {
return err return err
} }
@ -342,10 +374,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
} }
} }
if len(d) > maxMsgSize { if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java // TODO: Revisit the error code. Currently keep it consistent with java
// implementation. // implementation.
return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
} }
if err := c.Unmarshal(d, m); err != nil { if err := c.Unmarshal(d, m); err != nil {
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
@ -360,116 +392,57 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return nil return nil
} }
// rpcError defines the status from an RPC. type rpcInfo struct {
type rpcError struct { bytesSent bool
code codes.Code bytesReceived bool
desc string
} }
func (e *rpcError) Error() string { type rpcInfoContextKey struct{}
return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
func newContextWithRPCInfo(ctx context.Context) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{})
}
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
return
}
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
if ss, ok := rpcInfoFromContext(ctx); ok {
*ss = s
}
return
} }
// Code returns the error code for err if it was produced by the rpc system. // Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown. // Otherwise, it returns codes.Unknown.
//
// Deprecated; use status.FromError and Code method instead.
func Code(err error) codes.Code { func Code(err error) codes.Code {
if err == nil { if s, ok := status.FromError(err); ok {
return codes.OK return s.Code()
}
if e, ok := err.(*rpcError); ok {
return e.code
} }
return codes.Unknown return codes.Unknown
} }
// ErrorDesc returns the error description of err if it was produced by the rpc system. // ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil. // Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated; use status.FromError and Message method instead.
func ErrorDesc(err error) string { func ErrorDesc(err error) string {
if err == nil { if s, ok := status.FromError(err); ok {
return "" return s.Message()
}
if e, ok := err.(*rpcError); ok {
return e.desc
} }
return err.Error() return err.Error()
} }
// Errorf returns an error containing an error code and a description; // Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK. // Errorf returns nil if c is OK.
//
// Deprecated; use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error { func Errorf(c codes.Code, format string, a ...interface{}) error {
if c == codes.OK { return status.Errorf(c, format, a...)
return nil
}
return &rpcError{
code: c,
desc: fmt.Sprintf(format, a...),
}
}
// toRPCErr converts an error into a rpcError.
func toRPCErr(err error) error {
switch e := err.(type) {
case *rpcError:
return err
case transport.StreamError:
return &rpcError{
code: e.Code,
desc: e.Desc,
}
case transport.ConnectionError:
return &rpcError{
code: codes.Internal,
desc: e.Desc,
}
default:
switch err {
case context.DeadlineExceeded:
return &rpcError{
code: codes.DeadlineExceeded,
desc: err.Error(),
}
case context.Canceled:
return &rpcError{
code: codes.Canceled,
desc: err.Error(),
}
case ErrClientConnClosing:
return &rpcError{
code: codes.FailedPrecondition,
desc: err.Error(),
}
}
}
return Errorf(codes.Unknown, "%v", err)
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
} }
// MethodConfig defines the configuration recommended by the service providers for a // MethodConfig defines the configuration recommended by the service providers for a
@ -479,24 +452,22 @@ type MethodConfig struct {
// WaitForReady indicates whether RPCs sent to this method should wait until // WaitForReady indicates whether RPCs sent to this method should wait until
// the connection is ready by default (!failfast). The value specified via the // the connection is ready by default (!failfast). The value specified via the
// gRPC client API will override the value set here. // gRPC client API will override the value set here.
WaitForReady bool WaitForReady *bool
// Timeout is the default timeout for RPCs sent to this method. The actual // Timeout is the default timeout for RPCs sent to this method. The actual
// deadline used will be the minimum of the value specified here and the value // deadline used will be the minimum of the value specified here and the value
// set by the application via the gRPC client API. If either one is not set, // set by the application via the gRPC client API. If either one is not set,
// then the other will be used. If neither is set, then the RPC has no deadline. // then the other will be used. If neither is set, then the RPC has no deadline.
Timeout time.Duration Timeout *time.Duration
// MaxReqSize is the maximum allowed payload size for an individual request in a // MaxReqSize is the maximum allowed payload size for an individual request in a
// stream (client->server) in bytes. The size which is measured is the serialized, // stream (client->server) in bytes. The size which is measured is the serialized
// uncompressed payload in bytes. The actual value used is the minumum of the value // payload after per-message compression (but before stream compression) in bytes.
// specified here and the value set by the application via the gRPC client API. If // The actual value used is the minumum of the value specified here and the value set
// either one is not set, then the other will be used. If neither is set, then the // by the application via the gRPC client API. If either one is not set, then the other
// built-in default is used. // will be used. If neither is set, then the built-in default is used.
// TODO: support this. MaxReqSize *int
MaxReqSize uint64
// MaxRespSize is the maximum allowed payload size for an individual response in a // MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes. // stream (server->client) in bytes.
// TODO: support this. MaxRespSize *int
MaxRespSize uint64
} }
// ServiceConfig is provided by the service provider and contains parameters for how // ServiceConfig is provided by the service provider and contains parameters for how
@ -507,9 +478,38 @@ type ServiceConfig struct {
// via grpc.WithBalancer will override this. // via grpc.WithBalancer will override this.
LB Balancer LB Balancer
// Methods contains a map for the methods in this service. // Methods contains a map for the methods in this service.
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
// Otherwise, the method has no MethodConfig to use.
Methods map[string]MethodConfig Methods map[string]MethodConfig
} }
func min(a, b *int) *int {
if *a < *b {
return a
}
return b
}
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
if mcMax == nil && doptMax == nil {
return &defaultVal
}
if mcMax != nil && doptMax != nil {
return min(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
}
return doptMax
}
// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
// The latest support package version is 4.
// SupportPackageIsVersion3 is kept for compability. It will be removed in the
// next support package version update.
const SupportPackageIsVersion3 = true
// SupportPackageIsVersion4 is referenced from generated protocol buffer files // SupportPackageIsVersion4 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the grpc package. // to assert that that code is compatible with this version of the grpc package.
// //
@ -517,3 +517,8 @@ type ServiceConfig struct {
// requires a synchronised update of grpc-go and protoc-gen-go. This constant // requires a synchronised update of grpc-go and protoc-gen-go. This constant
// should not be referenced from any other code. // should not be referenced from any other code.
const SupportPackageIsVersion4 = true const SupportPackageIsVersion4 = true
// Version is the current grpc version.
const Version = "1.6.0-dev"
const grpcUA = "grpc-go/" + Version

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -53,12 +38,19 @@ import (
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal" "google.golang.org/grpc/internal"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats" "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap" "google.golang.org/grpc/tap"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
const (
defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
defaultServerMaxSendMessageSize = 1024 * 1024 * 4
)
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
// MethodDesc represents an RPC service's method specification. // MethodDesc represents an RPC service's method specification.
@ -94,6 +86,7 @@ type Server struct {
mu sync.Mutex // guards following mu sync.Mutex // guards following
lis map[net.Listener]bool lis map[net.Listener]bool
conns map[io.Closer]bool conns map[io.Closer]bool
serve bool
drain bool drain bool
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
@ -109,20 +102,59 @@ type options struct {
codec Codec codec Codec
cp Compressor cp Compressor
dc Decompressor dc Decompressor
maxMsgSize int
unaryInt UnaryServerInterceptor unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor streamInt StreamServerInterceptor
inTapHandle tap.ServerInHandle inTapHandle tap.ServerInHandle
statsHandler stats.Handler statsHandler stats.Handler
maxConcurrentStreams uint32 maxConcurrentStreams uint32
maxReceiveMessageSize int
maxSendMessageSize int
useHandlerImpl bool // use http.Handler-based server useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
keepaliveParams keepalive.ServerParameters
keepalivePolicy keepalive.EnforcementPolicy
initialWindowSize int32
initialConnWindowSize int32
} }
var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit var defaultServerOptions = options{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
}
// A ServerOption sets options. // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options) type ServerOption func(*options)
// InitialWindowSize returns a ServerOption that sets window size for stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialWindowSize(s int32) ServerOption {
return func(o *options) {
o.initialWindowSize = s
}
}
// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialConnWindowSize(s int32) ServerOption {
return func(o *options) {
o.initialConnWindowSize = s
}
}
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
return func(o *options) {
o.keepaliveParams = kp
}
}
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) {
o.keepalivePolicy = kep
}
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
func CustomCodec(codec Codec) ServerOption { func CustomCodec(codec Codec) ServerOption {
return func(o *options) { return func(o *options) {
@ -144,11 +176,25 @@ func RPCDecompressor(dc Decompressor) ServerOption {
} }
} }
// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB. // If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
func MaxMsgSize(m int) ServerOption { func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB.
func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) { return func(o *options) {
o.maxMsgSize = m o.maxReceiveMessageSize = m
}
}
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
// If this is not set, gRPC uses the default 4MB.
func MaxSendMsgSize(m int) ServerOption {
return func(o *options) {
o.maxSendMessageSize = m
} }
} }
@ -173,7 +219,7 @@ func Creds(c credentials.TransportCredentials) ServerOption {
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) { return func(o *options) {
if o.unaryInt != nil { if o.unaryInt != nil {
panic("The unary server interceptor has been set.") panic("The unary server interceptor was already set and may not be reset.")
} }
o.unaryInt = i o.unaryInt = i
} }
@ -184,7 +230,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
func StreamInterceptor(i StreamServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) { return func(o *options) {
if o.streamInt != nil { if o.streamInt != nil {
panic("The stream server interceptor has been set.") panic("The stream server interceptor was already set and may not be reset.")
} }
o.streamInt = i o.streamInt = i
} }
@ -195,7 +241,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
func InTapHandle(h tap.ServerInHandle) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) { return func(o *options) {
if o.inTapHandle != nil { if o.inTapHandle != nil {
panic("The tap handle has been set.") panic("The tap handle was already set and may not be reset.")
} }
o.inTapHandle = h o.inTapHandle = h
} }
@ -208,11 +254,28 @@ func StatsHandler(h stats.Handler) ServerOption {
} }
} }
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the "unimplemented" gRPC
// error whenever a request is received for an unregistered service or method.
// The handling function has full access to the Context of the request and the
// stream, and the invocation passes through interceptors.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return func(o *options) {
o.unknownStreamDesc = &StreamDesc{
StreamName: "unknown_service_handler",
Handler: streamHandler,
// We need to assume that the users of the streamHandler will want to use both.
ClientStreams: true,
ServerStreams: true,
}
}
}
// NewServer creates a gRPC server which has no service registered and has not // NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet. // started to accept requests yet.
func NewServer(opt ...ServerOption) *Server { func NewServer(opt ...ServerOption) *Server {
var opts options opts := defaultServerOptions
opts.maxMsgSize = defaultMaxMsgSize
for _, o := range opt { for _, o := range opt {
o(&opts) o(&opts)
} }
@ -251,8 +314,8 @@ func (s *Server) errorf(format string, a ...interface{}) {
} }
} }
// RegisterService register a service and its implementation to the gRPC // RegisterService registers a service and its implementation to the gRPC
// server. Called from the IDL generated code. This must be called before // server. It is called from the IDL generated code. This must be called before
// invoking Serve. // invoking Serve.
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
ht := reflect.TypeOf(sd.HandlerType).Elem() ht := reflect.TypeOf(sd.HandlerType).Elem()
@ -267,6 +330,9 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
s.printf("RegisterService(%q)", sd.ServiceName) s.printf("RegisterService(%q)", sd.ServiceName)
if s.serve {
grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
}
if _, ok := s.m[sd.ServiceName]; ok { if _, ok := s.m[sd.ServiceName]; ok {
grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
} }
@ -297,7 +363,7 @@ type MethodInfo struct {
IsServerStream bool IsServerStream bool
} }
// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. // ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
type ServiceInfo struct { type ServiceInfo struct {
Methods []MethodInfo Methods []MethodInfo
// Metadata is the metadata specified in ServiceDesc when registering service. // Metadata is the metadata specified in ServiceDesc when registering service.
@ -355,6 +421,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
func (s *Server) Serve(lis net.Listener) error { func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock() s.mu.Lock()
s.printf("serving") s.printf("serving")
s.serve = true
if s.lis == nil { if s.lis == nil {
s.mu.Unlock() s.mu.Unlock()
lis.Close() lis.Close()
@ -390,10 +457,12 @@ func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock() s.mu.Lock()
s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.printf("Accept error: %v; retrying in %v", err, tempDelay)
s.mu.Unlock() s.mu.Unlock()
timer := time.NewTimer(tempDelay)
select { select {
case <-time.After(tempDelay): case <-timer.C:
case <-s.ctx.Done(): case <-s.ctx.Done():
} }
timer.Stop()
continue continue
} }
s.mu.Lock() s.mu.Lock()
@ -416,7 +485,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
s.mu.Lock() s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock() s.mu.Unlock()
grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
// If serverHandShake returns ErrConnDispatched, keep rawConn open. // If serverHandShake returns ErrConnDispatched, keep rawConn open.
if err != credentials.ErrConnDispatched { if err != credentials.ErrConnDispatched {
rawConn.Close() rawConn.Close()
@ -450,6 +519,10 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
AuthInfo: authInfo, AuthInfo: authInfo,
InTapHandle: s.opts.inTapHandle, InTapHandle: s.opts.inTapHandle,
StatsHandler: s.opts.statsHandler, StatsHandler: s.opts.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
InitialConnWindowSize: s.opts.initialConnWindowSize,
} }
st, err := transport.NewServerTransport("http2", c, config) st, err := transport.NewServerTransport("http2", c, config)
if err != nil { if err != nil {
@ -457,7 +530,7 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
s.mu.Unlock() s.mu.Unlock()
c.Close() c.Close()
grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
return return
} }
if !s.addConn(st) { if !s.addConn(st) {
@ -515,6 +588,30 @@ func (s *Server) serveUsingHandler(conn net.Conn) {
}) })
} }
// ServeHTTP implements the Go standard library's http.Handler
// interface by responding to the gRPC request r, by looking up
// the requested gRPC method in the gRPC server s.
//
// The provided HTTP request must have arrived on an HTTP/2
// connection. When using the Go standard library's server,
// practically this means that the Request must also have arrived
// over TLS.
//
// To share one port (such as 443 for https) between gRPC and an
// existing http.Handler, use a root http.Handler such as:
//
// if r.ProtoMajor == 2 && strings.HasPrefix(
// r.Header.Get("Content-Type"), "application/grpc") {
// grpcServer.ServeHTTP(w, r)
// } else {
// yourMux.ServeHTTP(w, r)
// }
//
// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
// separate from grpc-go's HTTP/2 server. Performance and features may vary
// between the two paths. ServeHTTP does not support some gRPC features
// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
// and subject to change.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r) st, err := transport.NewServerHandlerTransport(w, r)
if err != nil { if err != nil {
@ -581,14 +678,11 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
} }
p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
if err != nil { if err != nil {
// This typically indicates a fatal issue (e.g., memory grpclog.Errorln("grpc: server failed to encode response: ", err)
// corruption or hardware faults) the application program return err
// cannot handle. }
// if len(p) > s.opts.maxSendMessageSize {
// TODO(zhaoq): There exist other options also such as only closing the return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(p), s.opts.maxSendMessageSize)
// faulty stream locally and remotely (Other streams can keep going). Find
// the optimal option.
grpclog.Fatalf("grpc: Server failed to encode response %v", err)
} }
err = t.Write(stream, p, opts) err = t.Write(stream, p, opts)
if err == nil && outPayload != nil { if err == nil && outPayload != nil {
@ -605,9 +699,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
BeginTime: time.Now(), BeginTime: time.Now(),
} }
sh.HandleRPC(stream.Context(), begin) sh.HandleRPC(stream.Context(), begin)
}
defer func() { defer func() {
if sh != nil {
end := &stats.End{ end := &stats.End{
EndTime: time.Now(), EndTime: time.Now(),
} }
@ -615,8 +707,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
end.Error = toRPCErr(err) end.Error = toRPCErr(err)
} }
sh.HandleRPC(stream.Context(), end) sh.HandleRPC(stream.Context(), end)
}
}() }()
}
if trInfo != nil { if trInfo != nil {
defer trInfo.tr.Finish() defer trInfo.tr.Finish()
trInfo.firstLine.client = false trInfo.firstLine.client = false
@ -633,8 +725,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
stream.SetSendCompress(s.opts.cp.Type()) stream.SetSendCompress(s.opts.cp.Type())
} }
p := &parser{r: stream} p := &parser{r: stream}
for { pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
pf, req, err := p.recvMsg(s.opts.maxMsgSize)
if err == io.EOF { if err == io.EOF {
// The entire stream is done (for unary RPC only). // The entire stream is done (for unary RPC only).
return err return err
@ -643,45 +734,44 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
} }
if err != nil { if err != nil {
switch err := err.(type) { if st, ok := status.FromError(err); ok {
case *rpcError: if e := t.WriteStatus(stream, st); e != nil {
if e := t.WriteStatus(stream, err.code, err.desc); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
} }
} else {
switch st := err.(type) {
case transport.ConnectionError: case transport.ConnectionError:
// Nothing to do here. // Nothing to do here.
case transport.StreamError: case transport.StreamError:
if e := t.WriteStatus(stream, err.Code, err.Desc); e != nil { if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
} }
default: default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
}
} }
return err return err
} }
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
switch err := err.(type) { if st, ok := status.FromError(err); ok {
case *rpcError: if e := t.WriteStatus(stream, st); e != nil {
if e := t.WriteStatus(stream, err.code, err.desc); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
} }
return err return err
default:
if e := t.WriteStatus(stream, codes.Internal, err.Error()); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
} }
if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary. // TODO checkRecvPayload always return RPC error. Add a return here if necessary.
} }
}
var inPayload *stats.InPayload var inPayload *stats.InPayload
if sh != nil { if sh != nil {
inPayload = &stats.InPayload{ inPayload = &stats.InPayload{
RecvTime: time.Now(), RecvTime: time.Now(),
} }
} }
statusCode := codes.OK
statusDesc := ""
df := func(v interface{}) error { df := func(v interface{}) error {
if inPayload != nil { if inPayload != nil {
inPayload.WireLength = len(req) inPayload.WireLength = len(req)
@ -690,20 +780,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
var err error var err error
req, err = s.opts.dc.Do(bytes.NewReader(req)) req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil { if err != nil {
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
}
return Errorf(codes.Internal, err.Error()) return Errorf(codes.Internal, err.Error())
} }
} }
if len(req) > s.opts.maxMsgSize { if len(req) > s.opts.maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with // TODO: Revisit the error code. Currently keep it consistent with
// java implementation. // java implementation.
statusCode = codes.Internal return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
} }
if err := s.opts.codec.Unmarshal(req, v); err != nil { if err := s.opts.codec.Unmarshal(req, v); err != nil {
return err return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
} }
if inPayload != nil { if inPayload != nil {
inPayload.Payload = v inPayload.Payload = v
@ -718,21 +804,20 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
} }
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil { if appErr != nil {
if err, ok := appErr.(*rpcError); ok { appStatus, ok := status.FromError(appErr)
statusCode = err.code if !ok {
statusDesc = err.desc // Convert appErr if it is not a grpc status error.
} else { appErr = status.Error(convertCode(appErr), appErr.Error())
statusCode = convertCode(appErr) appStatus, _ = status.FromError(appErr)
statusDesc = appErr.Error()
} }
if trInfo != nil && statusCode != codes.OK { if trInfo != nil {
trInfo.tr.LazyLog(stringer(statusDesc), true) trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { if e := t.WriteStatus(stream, appStatus); e != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
} }
return Errorf(statusCode, statusDesc) return appErr
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false) trInfo.tr.LazyLog(stringer("OK"), false)
@ -742,27 +827,35 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Delay: false, Delay: false,
} }
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
switch err := err.(type) { if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
if s, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, s); e != nil {
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
switch st := err.(type) {
case transport.ConnectionError: case transport.ConnectionError:
// Nothing to do here. // Nothing to do here.
case transport.StreamError: case transport.StreamError:
statusCode = err.Code if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
statusDesc = err.Desc grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default: default:
statusCode = codes.Unknown panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
statusDesc = err.Error() }
} }
return err return err
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
} }
errWrite := t.WriteStatus(stream, statusCode, statusDesc) // TODO: Should we be logging if writing status failed here, like above?
if statusCode != codes.OK { // Should the logging be in WriteStatus? Should we ignore the WriteStatus
return Errorf(statusCode, statusDesc) // error or allow the stats handler to see it?
} return t.WriteStatus(stream, status.New(codes.OK, ""))
return errWrite
}
} }
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
@ -772,9 +865,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
BeginTime: time.Now(), BeginTime: time.Now(),
} }
sh.HandleRPC(stream.Context(), begin) sh.HandleRPC(stream.Context(), begin)
}
defer func() { defer func() {
if sh != nil {
end := &stats.End{ end := &stats.End{
EndTime: time.Now(), EndTime: time.Now(),
} }
@ -782,8 +873,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
end.Error = toRPCErr(err) end.Error = toRPCErr(err)
} }
sh.HandleRPC(stream.Context(), end) sh.HandleRPC(stream.Context(), end)
}
}() }()
}
if s.opts.cp != nil { if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type()) stream.SetSendCompress(s.opts.cp.Type())
} }
@ -794,7 +885,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
codec: s.opts.codec, codec: s.opts.codec,
cp: s.opts.cp, cp: s.opts.cp,
dc: s.opts.dc, dc: s.opts.dc,
maxMsgSize: s.opts.maxMsgSize, maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo, trInfo: trInfo,
statsHandler: sh, statsHandler: sh,
} }
@ -815,43 +907,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
}() }()
} }
var appErr error var appErr error
var server interface{}
if srv != nil {
server = srv.server
}
if s.opts.streamInt == nil { if s.opts.streamInt == nil {
appErr = sd.Handler(srv.server, ss) appErr = sd.Handler(server, ss)
} else { } else {
info := &StreamServerInfo{ info := &StreamServerInfo{
FullMethod: stream.Method(), FullMethod: stream.Method(),
IsClientStream: sd.ClientStreams, IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams, IsServerStream: sd.ServerStreams,
} }
appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) appErr = s.opts.streamInt(server, ss, info, sd.Handler)
} }
if appErr != nil { if appErr != nil {
if err, ok := appErr.(*rpcError); ok { appStatus, ok := status.FromError(appErr)
ss.statusCode = err.code if !ok {
ss.statusDesc = err.desc switch err := appErr.(type) {
} else if err, ok := appErr.(transport.StreamError); ok { case transport.StreamError:
ss.statusCode = err.Code appStatus = status.New(err.Code, err.Desc)
ss.statusDesc = err.Desc default:
} else { appStatus = status.New(convertCode(appErr), appErr.Error())
ss.statusCode = convertCode(appErr)
ss.statusDesc = appErr.Error()
} }
appErr = appStatus.Err()
} }
if trInfo != nil { if trInfo != nil {
ss.mu.Lock() ss.mu.Lock()
if ss.statusCode != codes.OK { ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
ss.trInfo.tr.SetError() ss.trInfo.tr.SetError()
} else {
ss.trInfo.tr.LazyLog(stringer("OK"), false)
}
ss.mu.Unlock() ss.mu.Unlock()
} }
errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) t.WriteStatus(ss.s, appStatus)
if ss.statusCode != codes.OK { // TODO: Should we log an error from WriteStatus here and below?
return Errorf(ss.statusCode, ss.statusDesc) return appErr
} }
return errWrite if trInfo != nil {
ss.mu.Lock()
ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
} }
@ -867,12 +963,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.SetError() trInfo.tr.SetError()
} }
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
if err := t.WriteStatus(stream, codes.InvalidArgument, errDesc); err != nil { if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.Finish() trInfo.tr.Finish()
@ -883,17 +979,21 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
method := sm[pos+1:] method := sm[pos+1:]
srv, ok := s.m[service] srv, ok := s.m[service]
if !ok { if !ok {
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return
}
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
errDesc := fmt.Sprintf("unknown service %v", service) errDesc := fmt.Sprintf("unknown service %v", service)
if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil { if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.Finish() trInfo.tr.Finish()
@ -913,13 +1013,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return
}
errDesc := fmt.Sprintf("unknown method %v", method) errDesc := fmt.Sprintf("unknown method %v", method)
if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil { if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.Finish() trInfo.tr.Finish()
@ -957,8 +1061,9 @@ func (s *Server) Stop() {
s.mu.Unlock() s.mu.Unlock()
} }
// GracefulStop stops the gRPC server gracefully. It stops the server to accept new // GracefulStop stops the gRPC server gracefully. It stops the server from
// connections and RPCs and blocks until all the pending RPCs are finished. // accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() { func (s *Server) GracefulStop() {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -45,19 +30,22 @@ type ConnTagInfo struct {
RemoteAddr net.Addr RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection. // LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr LocalAddr net.Addr
// TODO add QOS related fields.
} }
// RPCTagInfo defines the relevant information needed by RPC context tagger. // RPCTagInfo defines the relevant information needed by RPC context tagger.
type RPCTagInfo struct { type RPCTagInfo struct {
// FullMethodName is the RPC method in the format of /package.service/method. // FullMethodName is the RPC method in the format of /package.service/method.
FullMethodName string FullMethodName string
// FailFast indicates if this RPC is failfast.
// This field is only valid on client side, it's always false on server side.
FailFast bool
} }
// Handler defines the interface for the related stats handling (e.g., RPCs, connections). // Handler defines the interface for the related stats handling (e.g., RPCs, connections).
type Handler interface { type Handler interface {
// TagRPC can attach some information to the given context. // TagRPC can attach some information to the given context.
// The returned context is used in the rest lifetime of the RPC. // The context used for the rest lifetime of the RPC will be derived from
// the returned context.
TagRPC(context.Context, *RPCTagInfo) context.Context TagRPC(context.Context, *RPCTagInfo) context.Context
// HandleRPC processes the RPC stats. // HandleRPC processes the RPC stats.
HandleRPC(context.Context, RPCStats) HandleRPC(context.Context, RPCStats)

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -49,7 +34,7 @@ type RPCStats interface {
} }
// Begin contains stats when an RPC begins. // Begin contains stats when an RPC begins.
// FailFast are only valid if Client is true. // FailFast is only valid if this Begin is from client side.
type Begin struct { type Begin struct {
// Client is true if this Begin is from client side. // Client is true if this Begin is from client side.
Client bool Client bool
@ -59,7 +44,7 @@ type Begin struct {
FailFast bool FailFast bool
} }
// IsClient indicates if this is from client side. // IsClient indicates if the stats information is from client side.
func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {} func (s *Begin) isRPCStats() {}
@ -80,19 +65,19 @@ type InPayload struct {
RecvTime time.Time RecvTime time.Time
} }
// IsClient indicates if this is from client side. // IsClient indicates if the stats information is from client side.
func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {} func (s *InPayload) isRPCStats() {}
// InHeader contains stats when a header is received. // InHeader contains stats when a header is received.
// FullMethod, addresses and Compression are only valid if Client is false.
type InHeader struct { type InHeader struct {
// Client is true if this InHeader is from client side. // Client is true if this InHeader is from client side.
Client bool Client bool
// WireLength is the wire length of header. // WireLength is the wire length of header.
WireLength int WireLength int
// The following fields are valid only if Client is false.
// FullMethod is the full RPC method string, i.e., /package.service/method. // FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string FullMethod string
// RemoteAddr is the remote address of the corresponding connection. // RemoteAddr is the remote address of the corresponding connection.
@ -103,7 +88,7 @@ type InHeader struct {
Compression string Compression string
} }
// IsClient indicates if this is from client side. // IsClient indicates if the stats information is from client side.
func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {} func (s *InHeader) isRPCStats() {}
@ -116,7 +101,7 @@ type InTrailer struct {
WireLength int WireLength int
} }
// IsClient indicates if this is from client side. // IsClient indicates if the stats information is from client side.
func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {} func (s *InTrailer) isRPCStats() {}
@ -137,19 +122,19 @@ type OutPayload struct {
SentTime time.Time SentTime time.Time
} }
// IsClient indicates if this is from client side. // IsClient indicates if this stats information is from client side.
func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {} func (s *OutPayload) isRPCStats() {}
// OutHeader contains stats when a header is sent. // OutHeader contains stats when a header is sent.
// FullMethod, addresses and Compression are only valid if Client is true.
type OutHeader struct { type OutHeader struct {
// Client is true if this OutHeader is from client side. // Client is true if this OutHeader is from client side.
Client bool Client bool
// WireLength is the wire length of header. // WireLength is the wire length of header.
WireLength int WireLength int
// The following fields are valid only if Client is true.
// FullMethod is the full RPC method string, i.e., /package.service/method. // FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string FullMethod string
// RemoteAddr is the remote address of the corresponding connection. // RemoteAddr is the remote address of the corresponding connection.
@ -160,7 +145,7 @@ type OutHeader struct {
Compression string Compression string
} }
// IsClient indicates if this is from client side. // IsClient indicates if this stats information is from client side.
func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {} func (s *OutHeader) isRPCStats() {}
@ -173,7 +158,7 @@ type OutTrailer struct {
WireLength int WireLength int
} }
// IsClient indicates if this is from client side. // IsClient indicates if this stats information is from client side.
func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {} func (s *OutTrailer) isRPCStats() {}
@ -184,7 +169,9 @@ type End struct {
Client bool Client bool
// EndTime is the time when the RPC ends. // EndTime is the time when the RPC ends.
EndTime time.Time EndTime time.Time
// Error is the error just happened. Its type is gRPC error. // Error is the error the RPC ended with. It is an error generated from
// status.Status and can be converted back to status.Status using
// status.FromError if non-nil.
Error error Error error
} }

168
vendor/google.golang.org/grpc/status/status.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package status implements errors returned by gRPC. These errors are
// serialized and transmitted on the wire between server and client, and allow
// for additional data to be transmitted via the Details field in the status
// proto. gRPC service handlers should return an error created by this
// package, and gRPC clients should expect a corresponding error to be
// returned from the RPC call.
//
// This package upholds the invariants that a non-nil error may not
// contain an OK code, and an OK code must result in a nil error.
package status
import (
"errors"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
)
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status
func (se *statusError) Error() string {
p := (*spb.Status)(se)
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
func (se *statusError) status() *Status {
return &Status{s: (*spb.Status)(se)}
}
// Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto.
type Status struct {
s *spb.Status
}
// Code returns the status code contained in s.
func (s *Status) Code() codes.Code {
if s == nil || s.s == nil {
return codes.OK
}
return codes.Code(s.s.Code)
}
// Message returns the message contained in s.
func (s *Status) Message() string {
if s == nil || s.s == nil {
return ""
}
return s.s.Message
}
// Proto returns s's status as an spb.Status proto message.
func (s *Status) Proto() *spb.Status {
if s == nil {
return nil
}
return proto.Clone(s.s).(*spb.Status)
}
// Err returns an immutable error representing s; returns nil if s.Code() is
// OK.
func (s *Status) Err() error {
if s.Code() == codes.OK {
return nil
}
return (*statusError)(s.s)
}
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
}
// Newf returns New(c, fmt.Sprintf(format, a...)).
func Newf(c codes.Code, format string, a ...interface{}) *Status {
return New(c, fmt.Sprintf(format, a...))
}
// Error returns an error representing c and msg. If c is OK, returns nil.
func Error(c codes.Code, msg string) error {
return New(c, msg).Err()
}
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
func Errorf(c codes.Code, format string, a ...interface{}) error {
return Error(c, fmt.Sprintf(format, a...))
}
// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
func ErrorProto(s *spb.Status) error {
return FromProto(s).Err()
}
// FromProto returns a Status representing s.
func FromProto(s *spb.Status) *Status {
return &Status{s: proto.Clone(s).(*spb.Status)}
}
// FromError returns a Status representing err if it was produced from this
// package, otherwise it returns nil, false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
if s, ok := err.(*statusError); ok {
return s.status(), true
}
return nil, false
}
// WithDetails returns a new status with the provided details messages appended to the status.
// If any errors are encountered, it returns nil and the first error encountered.
func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
if s.Code() == codes.OK {
return nil, errors.New("no error details for status with code OK")
}
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
any, err := ptypes.MarshalAny(detail)
if err != nil {
return nil, err
}
p.Details = append(p.Details, any)
}
return &Status{s: p}, nil
}
// Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail.
func (s *Status) Details() []interface{} {
if s == nil || s.s == nil {
return nil
}
details := make([]interface{}, 0, len(s.s.Details))
for _, any := range s.s.Details {
detail := &ptypes.DynamicAny{}
if err := ptypes.UnmarshalAny(any, detail); err != nil {
details = append(details, err)
continue
}
details = append(details, detail.Message)
}
return details
}

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2014, Google Inc. * Copyright 2014 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */
@ -37,7 +22,6 @@ import (
"bytes" "bytes"
"errors" "errors"
"io" "io"
"math"
"sync" "sync"
"time" "time"
@ -45,7 +29,9 @@ import (
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats" "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport" "google.golang.org/grpc/transport"
) )
@ -73,11 +59,17 @@ type Stream interface {
// side. On server side, it simply returns the error to the caller. // side. On server side, it simply returns the error to the caller.
// SendMsg is called by generated code. Also Users can call SendMsg // SendMsg is called by generated code. Also Users can call SendMsg
// directly when it is really needed in their use cases. // directly when it is really needed in their use cases.
// It's safe to have a goroutine calling SendMsg and another goroutine calling
// recvMsg on the same stream at the same time.
// But it is not safe to call SendMsg on the same stream in different goroutines.
SendMsg(m interface{}) error SendMsg(m interface{}) error
// RecvMsg blocks until it receives a message or the stream is // RecvMsg blocks until it receives a message or the stream is
// done. On client side, it returns io.EOF when the stream is done. On // done. On client side, it returns io.EOF when the stream is done. On
// any other error, it aborts the stream and returns an RPC status. On // any other error, it aborts the stream and returns an RPC status. On
// server side, it simply returns the error to the caller. // server side, it simply returns the error to the caller.
// It's safe to have a goroutine calling SendMsg and another goroutine calling
// recvMsg on the same stream at the same time.
// But it is not safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error RecvMsg(m interface{}) error
} }
@ -93,6 +85,11 @@ type ClientStream interface {
// CloseSend closes the send direction of the stream. It closes the stream // CloseSend closes the send direction of the stream. It closes the stream
// when non-nil error is met. // when non-nil error is met.
CloseSend() error CloseSend() error
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
// the request. The returned error indicates the status of this sending, not the final
// status of the RPC.
// Always call Stream.RecvMsg() to get the final status if you care about the status of
// the RPC.
Stream Stream
} }
@ -113,25 +110,39 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
cancel context.CancelFunc cancel context.CancelFunc
) )
c := defaultCallInfo c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok { mc := cc.GetMethodConfig(method)
c.failFast = !mc.WaitForReady if mc.WaitForReady != nil {
if mc.Timeout > 0 { c.failFast = !*mc.WaitForReady
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
} }
if mc.Timeout != nil {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
} }
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts { for _, o := range opts {
if err := o.before(&c); err != nil { if err := o.before(&c); err != nil {
return nil, toRPCErr(err) return nil, toRPCErr(err)
} }
} }
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
callHdr := &transport.CallHdr{ callHdr := &transport.CallHdr{
Host: cc.authority, Host: cc.authority,
Method: method, Method: method,
Flush: desc.ServerStreams && desc.ClientStreams, // If it's not client streaming, we should already have the request to be sent,
// so we don't flush the header.
// If it's client streaming, the user may never send a request or send it any
// time soon, so we ask the transport to flush the header.
Flush: desc.ClientStreams,
} }
if cc.dopts.cp != nil { if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type() callHdr.SendCompress = cc.dopts.cp.Type()
} }
if c.creds != nil {
callHdr.Creds = c.creds
}
var trInfo traceInfo var trInfo traceInfo
if EnableTracing { if EnableTracing {
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
@ -151,18 +162,18 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
} }
}() }()
} }
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler sh := cc.dopts.copts.StatsHandler
if sh != nil { if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{ begin := &stats.Begin{
Client: true, Client: true,
BeginTime: time.Now(), BeginTime: time.Now(),
FailFast: c.failFast, FailFast: c.failFast,
} }
sh.HandleRPC(ctx, begin) sh.HandleRPC(ctx, begin)
}
defer func() { defer func() {
if err != nil && sh != nil { if err != nil {
// Only handle end stats if err != nil. // Only handle end stats if err != nil.
end := &stats.End{ end := &stats.End{
Client: true, Client: true,
@ -171,6 +182,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
sh.HandleRPC(ctx, end) sh.HandleRPC(ctx, end)
} }
}() }()
}
gopts := BalancerGetOptions{ gopts := BalancerGetOptions{
BlockingWait: !c.failFast, BlockingWait: !c.failFast,
} }
@ -178,7 +190,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
t, put, err = cc.getTransport(ctx, gopts) t, put, err = cc.getTransport(ctx, gopts)
if err != nil { if err != nil {
// TODO(zhaoq): Probably revisit the error handling. // TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok { if _, ok := status.FromError(err); ok {
return nil, err return nil, err
} }
if err == errConnClosing || err == errConnUnavailable { if err == errConnClosing || err == errConnUnavailable {
@ -193,20 +205,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
s, err = t.NewStream(ctx, callHdr) s, err = t.NewStream(ctx, callHdr)
if err != nil { if err != nil {
if _, ok := err.(transport.ConnectionError); ok && put != nil {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
if put != nil { if put != nil {
put() put()
put = nil put = nil
} }
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
if c.failFast {
return nil, toRPCErr(err)
}
continue continue
} }
return nil, toRPCErr(err) return nil, toRPCErr(err)
} }
break break
} }
// Set callInfo.peer object from stream's context.
if peer, ok := peer.FromContext(s.Context()); ok {
c.peer = peer
}
cs := &clientStream{ cs := &clientStream{
opts: opts, opts: opts,
c: c, c: c,
@ -236,14 +255,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
select { select {
case <-t.Error(): case <-t.Error():
// Incur transport error, simply exit. // Incur transport error, simply exit.
case <-cc.ctx.Done():
cs.finish(ErrClientConnClosing)
cs.closeTransportStream(ErrClientConnClosing)
case <-s.Done(): case <-s.Done():
// TODO: The trace of the RPC is terminated here when there is no pending // TODO: The trace of the RPC is terminated here when there is no pending
// I/O, which is probably not the optimal solution. // I/O, which is probably not the optimal solution.
if s.StatusCode() == codes.OK { cs.finish(s.Status().Err())
cs.finish(nil)
} else {
cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
}
cs.closeTransportStream(nil) cs.closeTransportStream(nil)
case <-s.GoAway(): case <-s.GoAway():
cs.finish(errConnDrain) cs.finish(errConnDrain)
@ -276,6 +294,7 @@ type clientStream struct {
mu sync.Mutex mu sync.Mutex
put func() put func()
closed bool closed bool
finished bool
// trInfo.tr is set when the clientStream is created (if EnableTracing is true), // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called. // and is set to nil when the clientStream's finish method is called.
trInfo traceInfo trInfo traceInfo
@ -350,7 +369,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
} }
}() }()
if err != nil { if err != nil {
return Errorf(codes.Internal, "grpc: %v", err) return err
}
if cs.c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(out) > *cs.c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize)
} }
err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
if err == nil && outPayload != nil { if err == nil && outPayload != nil {
@ -361,28 +386,16 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
} }
func (cs *clientStream) RecvMsg(m interface{}) (err error) { func (cs *clientStream) RecvMsg(m interface{}) (err error) {
defer func() {
if err != nil && cs.statsHandler != nil {
// Only generate End if err != nil.
// If err == nil, it's not the last RecvMsg.
// The last RecvMsg gets either an RPC error or io.EOF.
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
}()
var inPayload *stats.InPayload var inPayload *stats.InPayload
if cs.statsHandler != nil { if cs.statsHandler != nil {
inPayload = &stats.InPayload{ inPayload = &stats.InPayload{
Client: true, Client: true,
} }
} }
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, inPayload) if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
defer func() { defer func() {
// err != nil indicates the termination of the stream. // err != nil indicates the termination of the stream.
if err != nil { if err != nil {
@ -405,30 +418,33 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
} }
// Special handling for client streaming rpc. // Special handling for client streaming rpc.
// This recv expects EOF or errors, so we don't collect inPayload. // This recv expects EOF or errors, so we don't collect inPayload.
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, nil) if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
cs.closeTransportStream(err) cs.closeTransportStream(err)
if err == nil { if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
} }
if err == io.EOF { if err == io.EOF {
if cs.s.StatusCode() == codes.OK { if se := cs.s.Status().Err(); se != nil {
return se
}
cs.finish(err) cs.finish(err)
return nil return nil
} }
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
}
return toRPCErr(err) return toRPCErr(err)
} }
if _, ok := err.(transport.ConnectionError); !ok { if _, ok := err.(transport.ConnectionError); !ok {
cs.closeTransportStream(err) cs.closeTransportStream(err)
} }
if err == io.EOF { if err == io.EOF {
if cs.s.StatusCode() == codes.OK { if statusErr := cs.s.Status().Err(); statusErr != nil {
return statusErr
}
// Returns io.EOF to indicate the end of the stream. // Returns io.EOF to indicate the end of the stream.
return return
} }
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
}
return toRPCErr(err) return toRPCErr(err)
} }
@ -461,20 +477,39 @@ func (cs *clientStream) closeTransportStream(err error) {
} }
func (cs *clientStream) finish(err error) { func (cs *clientStream) finish(err error) {
cs.mu.Lock()
defer cs.mu.Unlock()
if cs.finished {
return
}
cs.finished = true
defer func() { defer func() {
if cs.cancel != nil { if cs.cancel != nil {
cs.cancel() cs.cancel()
} }
}() }()
cs.mu.Lock()
defer cs.mu.Unlock()
for _, o := range cs.opts { for _, o := range cs.opts {
o.after(&cs.c) o.after(&cs.c)
} }
if cs.put != nil { if cs.put != nil {
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
bytesSent: cs.s.BytesSent(),
bytesReceived: cs.s.BytesReceived(),
})
cs.put() cs.put()
cs.put = nil cs.put = nil
} }
if cs.statsHandler != nil {
end := &stats.End{
Client: true,
EndTime: time.Now(),
}
if err != io.EOF {
// end.Error is nil if the RPC finished successfully.
end.Error = toRPCErr(err)
}
cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
if !cs.tracing { if !cs.tracing {
return return
} }
@ -518,9 +553,8 @@ type serverStream struct {
cp Compressor cp Compressor
dc Decompressor dc Decompressor
cbuf *bytes.Buffer cbuf *bytes.Buffer
maxMsgSize int maxReceiveMessageSize int
statusCode codes.Code maxSendMessageSize int
statusDesc string
trInfo *traceInfo trInfo *traceInfo
statsHandler stats.Handler statsHandler stats.Handler
@ -577,9 +611,11 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
} }
}() }()
if err != nil { if err != nil {
err = Errorf(codes.Internal, "grpc: %v", err)
return err return err
} }
if len(out) > ss.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
return toRPCErr(err) return toRPCErr(err)
} }
@ -609,7 +645,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if ss.statsHandler != nil { if ss.statsHandler != nil {
inPayload = &stats.InPayload{} inPayload = &stats.InPayload{}
} }
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil { if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF { if err == io.EOF {
return err return err
} }

View File

@ -1,33 +1,18 @@
/* /*
* *
* Copyright 2016, Google Inc. * Copyright 2016 gRPC authors.
* All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Licensed under the Apache License, Version 2.0 (the "License");
* modification, are permitted provided that the following conditions are * you may not use this file except in compliance with the License.
* met: * You may obtain a copy of the License at
* *
* * Redistributions of source code must retain the above copyright * http://www.apache.org/licenses/LICENSE-2.0
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * Unless required by applicable law or agreed to in writing, software
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * distributed under the License is distributed on an "AS IS" BASIS,
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * See the License for the specific language governing permissions and
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * limitations under the License.
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*/ */

Some files were not shown because too many files have changed in this diff Show More