update grpc packages
This commit is contained in:
parent
8f51b2fba5
commit
6c01444a8a
|
@ -7,7 +7,7 @@ If you are new to github, please start by reading [Pull Request howto](https://h
|
|||
## Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
|
||||
|
||||
## Guidelines for Pull Requests
|
||||
How to get your contributions merged smoothly and quickly.
|
||||
|
@ -27,6 +27,10 @@ How to get your contributions merged smoothly and quickly.
|
|||
- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
|
||||
|
||||
- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
|
||||
- `make all` to test everything, OR
|
||||
- `make vet` to catch vet errors
|
||||
- `make test` to run the tests
|
||||
- `make testrace` to run tests in race mode
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason for doing so.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
all: test testrace
|
||||
all: vet test testrace
|
||||
|
||||
deps:
|
||||
go get -d -v google.golang.org/grpc/...
|
||||
|
@ -9,6 +9,9 @@ updatedeps:
|
|||
testdeps:
|
||||
go get -d -v -t google.golang.org/grpc/...
|
||||
|
||||
testgaedeps:
|
||||
goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
|
||||
|
||||
updatetestdeps:
|
||||
go get -d -v -t -u -f google.golang.org/grpc/...
|
||||
|
||||
|
@ -20,33 +23,33 @@ proto:
|
|||
echo "error: protoc not installed" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
go get -u -v github.com/golang/protobuf/protoc-gen-go
|
||||
# use $$dir as the root for all proto files in the same directory
|
||||
for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \
|
||||
protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \
|
||||
done
|
||||
go generate google.golang.org/grpc/...
|
||||
|
||||
vet:
|
||||
./vet.sh
|
||||
|
||||
test: testdeps
|
||||
go test -v -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
|
||||
|
||||
testrace: testdeps
|
||||
go test -v -race -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
testappengine: testgaedeps
|
||||
goapp test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean -i google.golang.org/grpc/...
|
||||
|
||||
coverage: testdeps
|
||||
./coverage.sh --coveralls
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
deps \
|
||||
updatedeps \
|
||||
testdeps \
|
||||
testgaedeps \
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
vet \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
coverage
|
||||
clean
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# gRPC-Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
|
||||
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
|
||||
|
||||
The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
|
||||
|
||||
|
@ -10,13 +10,13 @@ Installation
|
|||
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
|
||||
```
|
||||
$ go get google.golang.org/grpc
|
||||
$ go get -u google.golang.org/grpc
|
||||
```
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.6 or later.
|
||||
This requires Go 1.6 or later. Go 1.7 will be required soon.
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
|
|
|
@ -16,83 +16,23 @@
|
|||
*
|
||||
*/
|
||||
|
||||
// See internal/backoff package for the backoff implementation. This file is
|
||||
// kept for the exported types and API backward compatility.
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var (
|
||||
DefaultBackoffConfig = BackoffConfig{
|
||||
var DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
baseDelay: 1.0 * time.Second,
|
||||
factor: 1.6,
|
||||
jitter: 0.2,
|
||||
}
|
||||
)
|
||||
|
||||
// backoffStrategy defines the methodology for backing off after a grpc
|
||||
// connection failure.
|
||||
//
|
||||
// This is unexported until the gRPC project decides whether or not to allow
|
||||
// alternative backoff strategies. Once a decision is made, this type and its
|
||||
// method may be exported.
|
||||
type backoffStrategy interface {
|
||||
// backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
|
||||
// TODO(stevvooe): The following fields are not exported, as allowing
|
||||
// changes would violate the current gRPC specification for backoff. If
|
||||
// gRPC decides to allow more interesting backoff strategies, these fields
|
||||
// may be opened up in the future.
|
||||
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay time.Duration
|
||||
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor float64
|
||||
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter float64
|
||||
}
|
||||
|
||||
func setDefaults(bc *BackoffConfig) {
|
||||
md := bc.MaxDelay
|
||||
*bc = DefaultBackoffConfig
|
||||
|
||||
if md > 0 {
|
||||
bc.MaxDelay = md
|
||||
}
|
||||
}
|
||||
|
||||
func (bc BackoffConfig) backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return bc.baseDelay
|
||||
}
|
||||
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
|
|
|
@ -28,10 +28,12 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
|
@ -41,6 +43,8 @@ type Address struct {
|
|||
}
|
||||
|
||||
// BalancerConfig specifies the configurations for Balancer.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type BalancerConfig struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
|
@ -53,7 +57,8 @@ type BalancerConfig struct {
|
|||
}
|
||||
|
||||
// BalancerGetOptions configures a Get call.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type BalancerGetOptions struct {
|
||||
// BlockingWait specifies whether Get should block when there is no
|
||||
// connected address.
|
||||
|
@ -61,7 +66,8 @@ type BalancerGetOptions struct {
|
|||
}
|
||||
|
||||
// Balancer chooses network addresses for RPCs.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type Balancer interface {
|
||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
||||
// this function may start the name resolution and watch the updates. It will
|
||||
|
@ -134,6 +140,8 @@ func downErrorf(timeout, temporary bool, format string, a ...interface{}) downEr
|
|||
|
||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||
// the name resolution updates and updates the addresses available correspondingly.
|
||||
//
|
||||
// Deprecated: please use package balancer/roundrobin.
|
||||
func RoundRobin(r naming.Resolver) Balancer {
|
||||
return &roundRobin{r: r}
|
||||
}
|
||||
|
@ -310,7 +318,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
|
|||
if !opts.BlockingWait {
|
||||
if len(rr.addrs) == 0 {
|
||||
rr.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
err = status.Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
||||
|
@ -395,3 +403,14 @@ func (rr *roundRobin) Close() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
|
||||
// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
|
||||
// returns the only address Up by resetTransport().
|
||||
type pickFirst struct {
|
||||
*roundRobin
|
||||
}
|
||||
|
||||
func pickFirstBalancerV1(r naming.Resolver) Balancer {
|
||||
return &pickFirst{&roundRobin{r: r}}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,274 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package balancer defines APIs for load balancing in gRPC.
|
||||
// All APIs in this package are experimental.
|
||||
package balancer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var (
|
||||
// m is a map from name to balancer builder.
|
||||
m = make(map[string]Builder)
|
||||
)
|
||||
|
||||
// Register registers the balancer builder to the balancer map. b.Name
|
||||
// (lowercased) will be used as the name registered with this builder.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Balancers are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func Register(b Builder) {
|
||||
m[strings.ToLower(b.Name())] = b
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given name.
|
||||
// Note that the compare is done in a case-insenstive fashion.
|
||||
// If no builder is register with the name, nil will be returned.
|
||||
func Get(name string) Builder {
|
||||
if b, ok := m[strings.ToLower(name)]; ok {
|
||||
return b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubConn represents a gRPC sub connection.
|
||||
// Each sub connection contains a list of addresses. gRPC will
|
||||
// try to connect to them (in sequence), and stop trying the
|
||||
// remainder once one connection is successful.
|
||||
//
|
||||
// The reconnect backoff will be applied on the list, not a single address.
|
||||
// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
|
||||
//
|
||||
// All SubConns start in IDLE, and will not try to connect. To trigger
|
||||
// the connecting, Balancers must call Connect.
|
||||
// When the connection encounters an error, it will reconnect immediately.
|
||||
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
||||
// called.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type SubConn interface {
|
||||
// UpdateAddresses updates the addresses used in this SubConn.
|
||||
// gRPC checks if currently-connected address is still in the new list.
|
||||
// If it's in the list, the connection will be kept.
|
||||
// If it's not in the list, the connection will gracefully closed, and
|
||||
// a new connection will be created.
|
||||
//
|
||||
// This will trigger a state transition for the SubConn.
|
||||
UpdateAddresses([]resolver.Address)
|
||||
// Connect starts the connecting for this SubConn.
|
||||
Connect()
|
||||
}
|
||||
|
||||
// NewSubConnOptions contains options to create new SubConn.
|
||||
type NewSubConnOptions struct{}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// NewSubConn is called by balancer to create a new SubConn.
|
||||
// It doesn't block and wait for the connections to be established.
|
||||
// Behaviors of the SubConn can be controlled by options.
|
||||
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
|
||||
// RemoveSubConn removes the SubConn from ClientConn.
|
||||
// The SubConn will be shutdown.
|
||||
RemoveSubConn(SubConn)
|
||||
|
||||
// UpdateBalancerState is called by balancer to nofity gRPC that some internal
|
||||
// state in balancer has changed.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConn.
|
||||
UpdateBalancerState(s connectivity.State, p Picker)
|
||||
|
||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||
ResolveNow(resolver.ResolveNowOption)
|
||||
|
||||
// Target returns the dial target for this ClientConn.
|
||||
Target() string
|
||||
}
|
||||
|
||||
// BuildOptions contains additional information for Build.
|
||||
type BuildOptions struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it does not need to talk to another party securely.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
||||
// to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it doesn't need to talk to remote balancer.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// ChannelzParentID is the entity parent's channelz unique identification number.
|
||||
ChannelzParentID int64
|
||||
}
|
||||
|
||||
// Builder creates a balancer.
|
||||
type Builder interface {
|
||||
// Build creates a new balancer with the ClientConn.
|
||||
Build(cc ClientConn, opts BuildOptions) Balancer
|
||||
// Name returns the name of balancers built by this builder.
|
||||
// It will be used to pick balancers (for example in service config).
|
||||
Name() string
|
||||
}
|
||||
|
||||
// PickOptions contains addition information for the Pick operation.
|
||||
type PickOptions struct {
|
||||
// FullMethodName is the method name that NewClientStream() is called
|
||||
// with. The canonical format is /service/Method.
|
||||
FullMethodName string
|
||||
}
|
||||
|
||||
// DoneInfo contains additional information for done.
|
||||
type DoneInfo struct {
|
||||
// Err is the rpc error the RPC finished with. It could be nil.
|
||||
Err error
|
||||
// BytesSent indicates if any bytes have been sent to the server.
|
||||
BytesSent bool
|
||||
// BytesReceived indicates if any byte has been received from the server.
|
||||
BytesReceived bool
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
||||
// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
|
||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||
)
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
type Picker interface {
|
||||
// Pick returns the SubConn to be used to send the RPC.
|
||||
// The returned SubConn must be one returned by NewSubConn().
|
||||
//
|
||||
// This functions is expected to return:
|
||||
// - a SubConn that is known to be READY;
|
||||
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
|
||||
// made (for example, some SubConn is in CONNECTING mode);
|
||||
// - other errors if no active connecting is happening (for example, all SubConn
|
||||
// are in TRANSIENT_FAILURE mode).
|
||||
//
|
||||
// If a SubConn is returned:
|
||||
// - If it is READY, gRPC will send the RPC on it;
|
||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will block
|
||||
// until UpdateBalancerState() is called and will call pick on the new picker.
|
||||
//
|
||||
// If the returned error is not nil:
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||
// - If the error is ErrTransientFailure:
|
||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
||||
// is called to pick again;
|
||||
// - Otherwise, RPC will fail with unavailable error.
|
||||
// - Else (error is other non-nil error):
|
||||
// - The RPC will fail with unavailable error.
|
||||
//
|
||||
// The returned done() function will be called once the rpc has finished, with the
|
||||
// final status of that RPC.
|
||||
// done may be nil if balancer doesn't care about the RPC status.
|
||||
Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
|
||||
}
|
||||
|
||||
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
|
||||
// the connectivity states.
|
||||
//
|
||||
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
||||
//
|
||||
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
|
||||
// to be called synchronously from the same goroutine.
|
||||
// There's no guarantee on picker.Pick, it may be called anytime.
|
||||
type Balancer interface {
|
||||
// HandleSubConnStateChange is called by gRPC when the connectivity state
|
||||
// of sc has changed.
|
||||
// Balancer is expected to aggregate all the state of SubConn and report
|
||||
// that back to gRPC.
|
||||
// Balancer should also generate and update Pickers when its internal state has
|
||||
// been changed by the new state.
|
||||
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
||||
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
||||
// balancers.
|
||||
// Balancer can create new SubConn or remove SubConn with the addresses.
|
||||
// An empty address slice and a non-nil error will be passed if the resolver returns
|
||||
// non-nil error to gRPC.
|
||||
HandleResolvedAddrs([]resolver.Address, error)
|
||||
// Close closes the balancer. The balancer is not required to call
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
Close()
|
||||
}
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
//
|
||||
// Idle and Shutdown are not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type baseBuilder struct {
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &connectivityStateEvaluator{},
|
||||
// Initialize picker to a picker that always return
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateBalancerState with this picker.
|
||||
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
}
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Name() string {
|
||||
return bb.name
|
||||
}
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
|
||||
csEvltr *connectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
subConns map[resolver.Address]balancer.SubConn
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
|
||||
return
|
||||
}
|
||||
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
for _, a := range addrs {
|
||||
addrsSet[a] = struct{}{}
|
||||
if _, ok := b.subConns[a]; !ok {
|
||||
// a is a new address (not existing in b.subConns).
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
}
|
||||
b.subConns[a] = sc
|
||||
b.scStates[sc] = connectivity.Idle
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
for a, sc := range b.subConns {
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet[a]; !ok {
|
||||
b.cc.RemoveSubConn(sc)
|
||||
delete(b.subConns, a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
return
|
||||
}
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
oldS, ok := b.scStates[sc]
|
||||
if !ok {
|
||||
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
return
|
||||
}
|
||||
b.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
sc.Connect()
|
||||
case connectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(b.scStates, sc)
|
||||
}
|
||||
|
||||
oldAggrState := b.state
|
||||
b.state = b.csEvltr.recordTransition(oldS, s)
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
||||
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||
func (b *baseBalancer) Close() {
|
||||
}
|
||||
|
||||
// NewErrPicker returns a picker that always returns err on Pick().
|
||||
func NewErrPicker(err error) balancer.Picker {
|
||||
return &errPicker{err: err}
|
||||
}
|
||||
|
||||
type errPicker struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
|
||||
// connectivityStateEvaluator gets updated by addrConns when their
|
||||
// states transition, based on which it evaluates the state of
|
||||
// ClientConn.
|
||||
type connectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
// recordTransition records state change happening in every subConn and based on
|
||||
// that it evaluates what aggregated state should be.
|
||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
||||
// closes it is in Shutdown state.
|
||||
//
|
||||
// recordTransition should only be called synchronously from the same goroutine.
|
||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package base defines a balancer base that can be used to build balancers with
|
||||
// different picking algorithms.
|
||||
//
|
||||
// The base balancer creates a new SubConn for each resolved address. The
|
||||
// provided picker will only be notified about READY SubConns.
|
||||
//
|
||||
// This package is the base of round_robin balancer, its purpose is to be used
|
||||
// to build round_robin like balancers with complex picking algorithms.
|
||||
// Balancers with more complicated logic should try to implement a balancer
|
||||
// builder from scratch.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package base
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// PickerBuilder creates balancer.Picker.
|
||||
type PickerBuilder interface {
|
||||
// Build takes a slice of ready SubConns, and returns a picker that will be
|
||||
// used by gRPC to pick a SubConn.
|
||||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
||||
}
|
||||
|
||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||
// built by this builder will use the picker builder to build pickers.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
pickerBuilder: pb,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
|
||||
// installed as one of the default balancers in gRPC, users don't need to
|
||||
// explicitly install this balancer.
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
const Name = "round_robin"
|
||||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newBuilder())
|
||||
}
|
||||
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
||||
var scs []balancer.SubConn
|
||||
for _, sc := range readySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrPicker{
|
||||
subConns: scs,
|
||||
}
|
||||
}
|
||||
|
||||
type rrPicker struct {
|
||||
// subConns is the snapshot of the roundrobin balancer when this picker was
|
||||
// created. The slice is immutable. Each Get() will do a round robin
|
||||
// selection from it and return the selected SubConn.
|
||||
subConns []balancer.SubConn
|
||||
|
||||
mu sync.Mutex
|
||||
next int
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if len(p.subConns) <= 0 {
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
return sc, nil, nil
|
||||
}
|
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// scStateUpdate contains the subConn and the new state it changed to.
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
|
||||
// TODO make a general purpose buffer that uses interface{}.
|
||||
type scStateUpdateBuffer struct {
|
||||
c chan *scStateUpdate
|
||||
mu sync.Mutex
|
||||
backlog []*scStateUpdate
|
||||
}
|
||||
|
||||
func newSCStateUpdateBuffer() *scStateUpdateBuffer {
|
||||
return &scStateUpdateBuffer{
|
||||
c: make(chan *scStateUpdate, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that the scStateUpdate will be sent to.
|
||||
//
|
||||
// Upon receiving, the caller should call load to send another
|
||||
// scStateChangeTuple onto the channel if there is any.
|
||||
func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// resolverUpdate contains the new resolved addresses or error if there's
|
||||
// any.
|
||||
type resolverUpdate struct {
|
||||
addrs []resolver.Address
|
||||
err error
|
||||
}
|
||||
|
||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||
// It implements balancer.ClientConn interface.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
balancer balancer.Balancer
|
||||
stateChangeQueue *scStateUpdateBuffer
|
||||
resolverUpdateCh chan *resolverUpdate
|
||||
done chan struct{}
|
||||
|
||||
mu sync.Mutex
|
||||
subConns map[*acBalancerWrapper]struct{}
|
||||
}
|
||||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
stateChangeQueue: newSCStateUpdateBuffer(),
|
||||
resolverUpdateCh: make(chan *resolverUpdate, 1),
|
||||
done: make(chan struct{}),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// watcher balancer functions sequentially, so the balancer can be implemented
|
||||
// lock-free.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.stateChangeQueue.get():
|
||||
ccb.stateChangeQueue.load()
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
||||
case t := <-ccb.resolverUpdateCh:
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
|
||||
case <-ccb.done:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
ccb.mu.Lock()
|
||||
scs := ccb.subConns
|
||||
ccb.subConns = nil
|
||||
ccb.mu.Unlock()
|
||||
for acbw := range scs {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
close(ccb.done)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
// don't want the balancer to receive this state change. So before
|
||||
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
||||
// this function will be called with (nil, Shutdown). We don't need to call
|
||||
// balancer method in this case.
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.stateChangeQueue.put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
select {
|
||||
case <-ccb.resolverUpdateCh:
|
||||
default:
|
||||
}
|
||||
ccb.resolverUpdateCh <- &resolverUpdate{
|
||||
addrs: addrs,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if len(addrs) <= 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac}
|
||||
acbw.ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
acbw.ac.mu.Unlock()
|
||||
ccb.subConns[acbw] = struct{}{}
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
delete(ccb.subConns, acbw)
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
ccb.cc.csMgr.updateState(s)
|
||||
ccb.cc.blockingpicker.updatePicker(p)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) Target() string {
|
||||
return ccb.cc.target
|
||||
}
|
||||
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
mu sync.Mutex
|
||||
ac *addrConn
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
acbw.ac.mu.Lock()
|
||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||
// by balancer.
|
||||
//
|
||||
// TODO(bar) the state transition could be wrong when tearDown() old ac
|
||||
// and creating new ac, fix the transition.
|
||||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
ac, err := cc.newAddrConn(addrs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = ac
|
||||
ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
ac.mu.Unlock()
|
||||
if acState != connectivity.Idle {
|
||||
ac.connect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
return acbw.ac
|
||||
}
|
|
@ -0,0 +1,328 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type balancerWrapperBuilder struct {
|
||||
b Balancer // The v1 balancer.
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
||||
targetAddr := cc.Target()
|
||||
targetSplitted := strings.Split(targetAddr, ":///")
|
||||
if len(targetSplitted) >= 2 {
|
||||
targetAddr = targetSplitted[1]
|
||||
}
|
||||
|
||||
bwb.b.Start(targetAddr, BalancerConfig{
|
||||
DialCreds: opts.DialCreds,
|
||||
Dialer: opts.Dialer,
|
||||
})
|
||||
_, pickfirst := bwb.b.(*pickFirst)
|
||||
bw := &balancerWrapper{
|
||||
balancer: bwb.b,
|
||||
pickfirst: pickfirst,
|
||||
cc: cc,
|
||||
targetAddr: targetAddr,
|
||||
startCh: make(chan struct{}),
|
||||
conns: make(map[resolver.Address]balancer.SubConn),
|
||||
connSt: make(map[balancer.SubConn]*scState),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
state: connectivity.Idle,
|
||||
}
|
||||
cc.UpdateBalancerState(connectivity.Idle, bw)
|
||||
go bw.lbWatcher()
|
||||
return bw
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Name() string {
|
||||
return "wrapper"
|
||||
}
|
||||
|
||||
type scState struct {
|
||||
addr Address // The v1 address type.
|
||||
s connectivity.State
|
||||
down func(error)
|
||||
}
|
||||
|
||||
type balancerWrapper struct {
|
||||
balancer Balancer // The v1 balancer.
|
||||
pickfirst bool
|
||||
|
||||
cc balancer.ClientConn
|
||||
targetAddr string // Target without the scheme.
|
||||
|
||||
mu sync.Mutex
|
||||
conns map[resolver.Address]balancer.SubConn
|
||||
connSt map[balancer.SubConn]*scState
|
||||
// This channel is closed when handling the first resolver result.
|
||||
// lbWatcher blocks until this is closed, to avoid race between
|
||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
||||
// - Build hasn't return, cc doesn't have access to balancer.
|
||||
startCh chan struct{}
|
||||
|
||||
// To aggregate the connectivity state.
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// lbWatcher watches the Notify channel of the balancer and manages
|
||||
// connections accordingly.
|
||||
func (bw *balancerWrapper) lbWatcher() {
|
||||
<-bw.startCh
|
||||
notifyCh := bw.balancer.Notify()
|
||||
if notifyCh == nil {
|
||||
// There's no resolver in the balancer. Connect directly.
|
||||
a := resolver.Address{
|
||||
Addr: bw.targetAddr,
|
||||
Type: resolver.Backend,
|
||||
}
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: Address{Addr: bw.targetAddr},
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for addrs := range notifyCh {
|
||||
grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
|
||||
if bw.pickfirst {
|
||||
var (
|
||||
oldA resolver.Address
|
||||
oldSC balancer.SubConn
|
||||
)
|
||||
bw.mu.Lock()
|
||||
for oldA, oldSC = range bw.conns {
|
||||
break
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
if oldSC != nil {
|
||||
// Teardown old sc.
|
||||
bw.mu.Lock()
|
||||
delete(bw.conns, oldA)
|
||||
delete(bw.connSt, oldSC)
|
||||
bw.mu.Unlock()
|
||||
bw.cc.RemoveSubConn(oldSC)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var newAddrs []resolver.Address
|
||||
for _, a := range addrs {
|
||||
newAddr := resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}
|
||||
newAddrs = append(newAddrs, newAddr)
|
||||
}
|
||||
if oldSC == nil {
|
||||
// Create new sc.
|
||||
sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
// For pickfirst, there should be only one SubConn, so the
|
||||
// address doesn't matter. All states updating (up and down)
|
||||
// and picking should all happen on that only SubConn.
|
||||
bw.conns[resolver.Address{}] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: addrs[0], // Use the first address.
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.connSt[oldSC].addr = addrs[0]
|
||||
bw.mu.Unlock()
|
||||
oldSC.UpdateAddresses(newAddrs)
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
add []resolver.Address // Addresses need to setup connections.
|
||||
del []balancer.SubConn // Connections need to tear down.
|
||||
)
|
||||
resAddrs := make(map[resolver.Address]Address)
|
||||
for _, a := range addrs {
|
||||
resAddrs[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}] = a
|
||||
}
|
||||
bw.mu.Lock()
|
||||
for a := range resAddrs {
|
||||
if _, ok := bw.conns[a]; !ok {
|
||||
add = append(add, a)
|
||||
}
|
||||
}
|
||||
for a, c := range bw.conns {
|
||||
if _, ok := resAddrs[a]; !ok {
|
||||
del = append(del, c)
|
||||
delete(bw.conns, a)
|
||||
// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
|
||||
}
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
for _, a := range add {
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: resAddrs[a],
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
for _, c := range del {
|
||||
bw.cc.RemoveSubConn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
scSt, ok := bw.connSt[sc]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if s == connectivity.Idle {
|
||||
sc.Connect()
|
||||
}
|
||||
oldS := scSt.s
|
||||
scSt.s = s
|
||||
if oldS != connectivity.Ready && s == connectivity.Ready {
|
||||
scSt.down = bw.balancer.Up(scSt.addr)
|
||||
} else if oldS == connectivity.Ready && s != connectivity.Ready {
|
||||
if scSt.down != nil {
|
||||
scSt.down(errConnClosing)
|
||||
}
|
||||
}
|
||||
sa := bw.csEvltr.RecordTransition(oldS, s)
|
||||
if bw.state != sa {
|
||||
bw.state = sa
|
||||
}
|
||||
bw.cc.UpdateBalancerState(bw.state, bw)
|
||||
if s == connectivity.Shutdown {
|
||||
// Remove state for this sc.
|
||||
delete(bw.connSt, sc)
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
select {
|
||||
case <-bw.startCh:
|
||||
default:
|
||||
close(bw.startCh)
|
||||
}
|
||||
// There should be a resolver inside the balancer.
|
||||
// All updates here, if any, are ignored.
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Close() {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
select {
|
||||
case <-bw.startCh:
|
||||
default:
|
||||
close(bw.startCh)
|
||||
}
|
||||
bw.balancer.Close()
|
||||
}
|
||||
|
||||
// The picker is the balancerWrapper itself.
|
||||
// Pick should never return ErrNoSubConnAvailable.
|
||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
||||
func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
failfast := true // Default failfast is true.
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
failfast = ss.failfast
|
||||
}
|
||||
a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var done func(balancer.DoneInfo)
|
||||
if p != nil {
|
||||
done = func(i balancer.DoneInfo) { p() }
|
||||
}
|
||||
var sc balancer.SubConn
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.pickfirst {
|
||||
// Get the first sc in conns.
|
||||
for _, sc = range bw.conns {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
var ok bool
|
||||
sc, ok = bw.conns[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend,
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}]
|
||||
if !ok && failfast {
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
||||
}
|
||||
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
|
||||
// If the returned sc is not ready and RPC is failfast,
|
||||
// return error, and this RPC will fail.
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
||||
}
|
||||
}
|
||||
|
||||
return sc, done, nil
|
||||
}
|
|
@ -19,291 +19,56 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// recvResponse receives and parses an RPC response.
|
||||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
// Invoke sends the RPC request on the wire and returns after response is
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
// TODO ctx is used for stats collection and processing. It is the context passed from the application.
|
||||
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
c.headerMD, err = stream.Header()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p := &parser{r: stream}
|
||||
var inPayload *stats.InPayload
|
||||
if dopts.copts.StatsHandler != nil {
|
||||
inPayload = &stats.InPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
for {
|
||||
if c.maxReceiveMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
||||
}
|
||||
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
|
||||
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
|
||||
// Fix the order if necessary.
|
||||
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
|
||||
}
|
||||
c.trailerMD = stream.Trailer()
|
||||
return nil
|
||||
}
|
||||
// All errors returned by Invoke are compatible with the status package.
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// If err is connection error, t will be closed, no need to close stream here.
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
var (
|
||||
cbuf *bytes.Buffer
|
||||
outPayload *stats.OutPayload
|
||||
)
|
||||
if compressor != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if dopts.copts.StatsHandler != nil {
|
||||
outPayload = &stats.OutPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.maxSendMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
||||
}
|
||||
if len(outBuf) > *c.maxSendMessageSize {
|
||||
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize)
|
||||
}
|
||||
err = t.Write(stream, outBuf, opts)
|
||||
if err == nil && outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
|
||||
}
|
||||
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
|
||||
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
|
||||
// recvResponse to get the final status.
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
// Sent successfully.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Invoke sends the RPC request on the wire and returns after response is received.
|
||||
// Invoke is called by generated code. Also users can call Invoke directly when it
|
||||
// is really needed in their use cases.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
if cc.dopts.unaryInt != nil {
|
||||
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
||||
}
|
||||
return invoke(ctx, method, args, reply, cc, opts...)
|
||||
}
|
||||
|
||||
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
|
||||
c := defaultCallInfo
|
||||
mc := cc.GetMethodConfig(method)
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||
// we don't use append because o1 could have extra capacity whose
|
||||
// elements would be overwritten, which could cause inadvertent
|
||||
// sharing (and race connditions) between concurrent calls
|
||||
if len(o1) == 0 {
|
||||
return o2
|
||||
} else if len(o2) == 0 {
|
||||
return o1
|
||||
}
|
||||
ret := make([]CallOption, len(o1)+len(o2))
|
||||
copy(ret, o1)
|
||||
copy(ret[len(o1):], o2)
|
||||
return ret
|
||||
}
|
||||
|
||||
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
||||
defer cancel()
|
||||
// Invoke sends the RPC request on the wire and returns after response is
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.Invoke instead.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
return cc.Invoke(ctx, method, args, reply, opts...)
|
||||
}
|
||||
|
||||
opts = append(cc.dopts.callOptions, opts...)
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
for _, o := range opts {
|
||||
o.after(&c)
|
||||
}
|
||||
}()
|
||||
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||
|
||||
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
||||
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
||||
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
defer c.traceInfo.tr.Finish()
|
||||
c.traceInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
||||
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
||||
defer func() {
|
||||
if e != nil {
|
||||
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
|
||||
c.traceInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
ctx = newContextWithRPCInfo(ctx)
|
||||
sh := cc.dopts.copts.StatsHandler
|
||||
if sh != nil {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||
begin := &stats.Begin{
|
||||
Client: true,
|
||||
BeginTime: time.Now(),
|
||||
FailFast: c.failFast,
|
||||
}
|
||||
sh.HandleRPC(ctx, begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
EndTime: time.Now(),
|
||||
Error: e,
|
||||
}
|
||||
sh.HandleRPC(ctx, end)
|
||||
}()
|
||||
}
|
||||
topts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
t transport.ClientTransport
|
||||
stream *transport.Stream
|
||||
// Record the put handler from Balancer.Get(...). It is called once the
|
||||
// RPC has completed or failed.
|
||||
put func()
|
||||
)
|
||||
// TODO(zhaoq): Need a formal spec of fail-fast.
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
if c.creds != nil {
|
||||
callHdr.Creds = c.creds
|
||||
}
|
||||
|
||||
gopts := BalancerGetOptions{
|
||||
BlockingWait: !c.failFast,
|
||||
}
|
||||
t, put, err = cc.getTransport(ctx, gopts)
|
||||
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
if err == errConnClosing || err == errConnUnavailable {
|
||||
if c.failFast {
|
||||
return Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// All the other errors are treated as Internal errors.
|
||||
return Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
// If error is connection error, transport was sending data on wire,
|
||||
// and we are not sure if anything has been sent on wire.
|
||||
// If error is not connection error, we are sure nothing has been sent.
|
||||
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
|
||||
}
|
||||
put()
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if peer, ok := peer.FromContext(stream.Context()); ok {
|
||||
c.peer = peer
|
||||
}
|
||||
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
put()
|
||||
}
|
||||
// Retry a non-failfast RPC when
|
||||
// i) there is a connection error; or
|
||||
// ii) the server started to drain before this RPC was initiated.
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
put()
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, nil)
|
||||
if put != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
put()
|
||||
}
|
||||
return stream.Status().Err()
|
||||
if err := cs.SendMsg(req); err != nil {
|
||||
return err
|
||||
}
|
||||
return cs.RecvMsg(reply)
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,86 +19,32 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
|
||||
)
|
||||
|
||||
// baseCodec contains the functionality of both Codec and encoding.Codec, but
|
||||
// omits the name/string, which vary between the two and are not needed for
|
||||
// anything besides the registry in the encoding package.
|
||||
type baseCodec interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
}
|
||||
|
||||
var _ baseCodec = Codec(nil)
|
||||
var _ baseCodec = encoding.Codec(nil)
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages.
|
||||
// Note that implementations of this interface must be thread safe;
|
||||
// a Codec's methods can be called from concurrent goroutines.
|
||||
//
|
||||
// Deprecated: use encoding.Codec instead.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// String returns the name of the Codec implementation. The returned
|
||||
// string will be used as part of content type in transmission.
|
||||
// String returns the name of the Codec implementation. This is unused by
|
||||
// gRPC.
|
||||
String() string
|
||||
}
|
||||
|
||||
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type protoCodec struct {
|
||||
}
|
||||
|
||||
type cachedProtoBuffer struct {
|
||||
lastMarshaledSize uint32
|
||||
proto.Buffer
|
||||
}
|
||||
|
||||
func capToMaxInt32(val int) uint32 {
|
||||
if val > math.MaxInt32 {
|
||||
return uint32(math.MaxInt32)
|
||||
}
|
||||
return uint32(val)
|
||||
}
|
||||
|
||||
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||
protoMsg := v.(proto.Message)
|
||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||
|
||||
cb.SetBuf(newSlice)
|
||||
cb.Reset()
|
||||
if err := cb.Marshal(protoMsg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := cb.Bytes()
|
||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
out, err := p.marshal(v, cb)
|
||||
|
||||
// put back buffer and lose the ref to the slice
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
cb.SetBuf(data)
|
||||
v.(proto.Message).Reset()
|
||||
err := cb.Unmarshal(v.(proto.Message))
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return err
|
||||
}
|
||||
|
||||
func (protoCodec) String() string {
|
||||
return "proto"
|
||||
}
|
||||
|
||||
var (
|
||||
protoBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &cachedProtoBuffer{
|
||||
Buffer: proto.Buffer{},
|
||||
lastMarshaledSize: 16,
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,16 +1,62 @@
|
|||
// generated by stringer -type=Code; DO NOT EDIT
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package codes
|
||||
|
||||
import "fmt"
|
||||
import "strconv"
|
||||
|
||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||
|
||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||
|
||||
func (i Code) String() string {
|
||||
if i+1 >= Code(len(_Code_index)) {
|
||||
return fmt.Sprintf("Code(%d)", i)
|
||||
func (c Code) String() string {
|
||||
switch c {
|
||||
case OK:
|
||||
return "OK"
|
||||
case Canceled:
|
||||
return "Canceled"
|
||||
case Unknown:
|
||||
return "Unknown"
|
||||
case InvalidArgument:
|
||||
return "InvalidArgument"
|
||||
case DeadlineExceeded:
|
||||
return "DeadlineExceeded"
|
||||
case NotFound:
|
||||
return "NotFound"
|
||||
case AlreadyExists:
|
||||
return "AlreadyExists"
|
||||
case PermissionDenied:
|
||||
return "PermissionDenied"
|
||||
case ResourceExhausted:
|
||||
return "ResourceExhausted"
|
||||
case FailedPrecondition:
|
||||
return "FailedPrecondition"
|
||||
case Aborted:
|
||||
return "Aborted"
|
||||
case OutOfRange:
|
||||
return "OutOfRange"
|
||||
case Unimplemented:
|
||||
return "Unimplemented"
|
||||
case Internal:
|
||||
return "Internal"
|
||||
case Unavailable:
|
||||
return "Unavailable"
|
||||
case DataLoss:
|
||||
return "DataLoss"
|
||||
case Unauthenticated:
|
||||
return "Unauthenticated"
|
||||
default:
|
||||
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||
}
|
||||
|
|
|
@ -20,11 +20,14 @@
|
|||
// consistent across various languages.
|
||||
package codes // import "google.golang.org/grpc/codes"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||
type Code uint32
|
||||
|
||||
//go:generate stringer -type=Code
|
||||
|
||||
const (
|
||||
// OK is returned on success.
|
||||
OK Code = 0
|
||||
|
@ -68,10 +71,6 @@ const (
|
|||
// instead for those errors).
|
||||
PermissionDenied Code = 7
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||
// a per-user quota, or perhaps the entire file system is out of space.
|
||||
ResourceExhausted Code = 8
|
||||
|
@ -141,4 +140,58 @@ const (
|
|||
|
||||
// DataLoss indicates unrecoverable data loss or corruption.
|
||||
DataLoss Code = 15
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
_maxCode = 17
|
||||
)
|
||||
|
||||
var strToCode = map[string]Code{
|
||||
`"OK"`: OK,
|
||||
`"CANCELLED"`:/* [sic] */ Canceled,
|
||||
`"UNKNOWN"`: Unknown,
|
||||
`"INVALID_ARGUMENT"`: InvalidArgument,
|
||||
`"DEADLINE_EXCEEDED"`: DeadlineExceeded,
|
||||
`"NOT_FOUND"`: NotFound,
|
||||
`"ALREADY_EXISTS"`: AlreadyExists,
|
||||
`"PERMISSION_DENIED"`: PermissionDenied,
|
||||
`"RESOURCE_EXHAUSTED"`: ResourceExhausted,
|
||||
`"FAILED_PRECONDITION"`: FailedPrecondition,
|
||||
`"ABORTED"`: Aborted,
|
||||
`"OUT_OF_RANGE"`: OutOfRange,
|
||||
`"UNIMPLEMENTED"`: Unimplemented,
|
||||
`"INTERNAL"`: Internal,
|
||||
`"UNAVAILABLE"`: Unavailable,
|
||||
`"DATA_LOSS"`: DataLoss,
|
||||
`"UNAUTHENTICATED"`: Unauthenticated,
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals b into the Code.
|
||||
func (c *Code) UnmarshalJSON(b []byte) error {
|
||||
// From json.Unmarshaler: By convention, to approximate the behavior of
|
||||
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
|
||||
// a no-op.
|
||||
if string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
if c == nil {
|
||||
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
||||
}
|
||||
|
||||
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
|
||||
if ci >= _maxCode {
|
||||
return fmt.Errorf("invalid code: %q", ci)
|
||||
}
|
||||
|
||||
*c = Code(ci)
|
||||
return nil
|
||||
}
|
||||
|
||||
if jc, ok := strToCode[string(b)]; ok {
|
||||
*c = jc
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid code: %q", string(b))
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
workdir=.cover
|
||||
profile="$workdir/cover.out"
|
||||
mode=set
|
||||
end2endtest="google.golang.org/grpc/test"
|
||||
|
||||
generate_cover_data() {
|
||||
rm -rf "$workdir"
|
||||
mkdir "$workdir"
|
||||
|
||||
for pkg in "$@"; do
|
||||
if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
|
||||
then
|
||||
f="$workdir/$(echo $pkg | tr / -)"
|
||||
go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
|
||||
go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "mode: $mode" >"$profile"
|
||||
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
|
||||
}
|
||||
|
||||
show_cover_report() {
|
||||
go tool cover -${1}="$profile"
|
||||
}
|
||||
|
||||
push_to_coveralls() {
|
||||
goveralls -coverprofile="$profile"
|
||||
}
|
||||
|
||||
generate_cover_data $(go list ./...)
|
||||
show_cover_report func
|
||||
case "$1" in
|
||||
"")
|
||||
;;
|
||||
--html)
|
||||
show_cover_report html ;;
|
||||
--coveralls)
|
||||
push_to_coveralls ;;
|
||||
*)
|
||||
echo >&2 "error: invalid option: $1" ;;
|
||||
esac
|
||||
rm -rf "$workdir"
|
|
@ -31,13 +31,12 @@ import (
|
|||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
alpnProtoStr = []string{"h2"}
|
||||
)
|
||||
var alpnProtoStr = []string{"h2"}
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
// attach security information to every RPC (e.g., oauth2).
|
||||
|
@ -45,8 +44,9 @@ type PerRPCCredentials interface {
|
|||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. uri is the URI of the entry point for the request. When
|
||||
// supported by the underlying implementation, ctx can be used for
|
||||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
|
@ -74,11 +74,9 @@ type AuthInfo interface {
|
|||
AuthType() string
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
|
||||
// and the caller should not close rawConn.
|
||||
ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||
)
|
||||
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||
|
||||
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
|
@ -91,10 +89,14 @@ type TransportCredentials interface {
|
|||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
|
||||
// If the returned error is a wrapper error, implementations should make sure that
|
||||
// the error implements Temporary() to have the correct retry behaviors.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
|
||||
// Info provides the ProtocolInfo of this TransportCredentials.
|
||||
Info() ProtocolInfo
|
||||
|
@ -117,6 +119,18 @@ func (t TLSInfo) AuthType() string {
|
|||
return "tls"
|
||||
}
|
||||
|
||||
// GetChannelzSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetChannelzSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
|
@ -131,15 +145,15 @@ func (c tlsCreds) Info() ProtocolInfo {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
colonPos := strings.LastIndex(authority, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
colonPos = len(authority)
|
||||
}
|
||||
cfg.ServerName = addr[:colonPos]
|
||||
cfg.ServerName = authority[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
|
@ -154,7 +168,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net
|
|||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||
return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
|
@ -162,7 +176,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
|||
if err := conn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||
return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
|
@ -217,3 +231,63 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
|||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||
// in order to provide security info to channelz.
|
||||
type ChannelzSecurityInfo interface {
|
||||
GetSecurityValue() ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
|
||||
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
|
||||
// and *OtherChannelzSecurityValue.
|
||||
type ChannelzSecurityValue interface {
|
||||
isChannelzSecurityValue()
|
||||
}
|
||||
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
StandardName string
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
|
||||
|
||||
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
|
||||
// from GetSecurityValue(), which contains protocol specific security info. Note
|
||||
// the Value field will be sent to users of channelz requesting channel info, and
|
||||
// thus sensitive info should better be avoided.
|
||||
type OtherChannelzSecurityValue struct {
|
||||
Name string
|
||||
Value proto.Message
|
||||
}
|
||||
|
||||
func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
|
||||
|
||||
type tlsConn struct {
|
||||
*tls.Conn
|
||||
rawConn net.Conn
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
// +build go1.7
|
||||
// +build !go1.8
|
||||
// +build go1.7,!go1.8
|
||||
|
||||
/*
|
||||
*
|
|
@ -24,6 +24,14 @@ import (
|
|||
"crypto/tls"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cipherSuiteLookup[tls.TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256"
|
||||
cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
|
||||
cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
|
||||
cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||
cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
|
@ -0,0 +1,35 @@
|
|||
// +build go1.9,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// implements the syscall.Conn interface
|
||||
func (c tlsConn) SyscallConn() (syscall.RawConn, error) {
|
||||
conn, ok := c.rawConn.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil, errors.New("RawConn does not implement syscall.Conn")
|
||||
}
|
||||
return conn.SyscallConn()
|
||||
}
|
|
@ -0,0 +1,450 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
unaryInt UnaryClientInterceptor
|
||||
streamInt StreamClientInterceptor
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs backoff.Strategy
|
||||
block bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
// This is used by v1 balancer dial option WithBalancer to support v1
|
||||
// balancer, and also by WithBalancerName dial option.
|
||||
balancerBuilder balancer.Builder
|
||||
// This is to support grpclb.
|
||||
resolverBuilder resolver.Builder
|
||||
waitForHandshake bool
|
||||
channelzParentID int64
|
||||
disableServiceConfig bool
|
||||
disableRetry bool
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
type DialOption interface {
|
||||
apply(*dialOptions)
|
||||
}
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type EmptyDialOption struct{}
|
||||
|
||||
func (EmptyDialOption) apply(*dialOptions) {}
|
||||
|
||||
// funcDialOption wraps a function that modifies dialOptions into an
|
||||
// implementation of the DialOption interface.
|
||||
type funcDialOption struct {
|
||||
f func(*dialOptions)
|
||||
}
|
||||
|
||||
func (fdo *funcDialOption) apply(do *dialOptions) {
|
||||
fdo.f(do)
|
||||
}
|
||||
|
||||
func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||||
return &funcDialOption{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// WithWaitForHandshake blocks until the initial settings frame is received from
|
||||
// the server before assigning RPCs to the connection. Experimental API.
|
||||
func WithWaitForHandshake() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.waitForHandshake = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB.
|
||||
//
|
||||
// Zero will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
func WithWriteBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.WriteBufferSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||||
// much data can be read at most for each read syscall.
|
||||
//
|
||||
// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||||
// a connection so data framer can access the underlying conn directly.
|
||||
func WithReadBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.ReadBufferSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithInitialWindowSize returns a DialOption which sets the value for initial
|
||||
// window size on a stream. The lower bound for window size is 64K and any value
|
||||
// smaller than that will be ignored.
|
||||
func WithInitialWindowSize(s int32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.InitialWindowSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithInitialConnWindowSize returns a DialOption which sets the value for
|
||||
// initial window size on a connection. The lower bound for window size is 64K
|
||||
// and any value smaller than that will be ignored.
|
||||
func WithInitialConnWindowSize(s int32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.InitialConnWindowSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxMsgSize returns a DialOption which sets the maximum message size the
|
||||
// client can receive.
|
||||
//
|
||||
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
|
||||
func WithMaxMsgSize(s int) DialOption {
|
||||
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
|
||||
}
|
||||
|
||||
// WithDefaultCallOptions returns a DialOption which sets the default
|
||||
// CallOptions for calls over the connection.
|
||||
func WithDefaultCallOptions(cos ...CallOption) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.callOptions = append(o.callOptions, cos...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithCodec returns a DialOption which sets a codec for message marshaling and
|
||||
// unmarshaling.
|
||||
//
|
||||
// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead.
|
||||
func WithCodec(c Codec) DialOption {
|
||||
return WithDefaultCallOptions(CallCustomCodec(c))
|
||||
}
|
||||
|
||||
// WithCompressor returns a DialOption which sets a Compressor to use for
|
||||
// message compression. It has lower priority than the compressor set by the
|
||||
// UseCompressor CallOption.
|
||||
//
|
||||
// Deprecated: use UseCompressor instead.
|
||||
func WithCompressor(cp Compressor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.cp = cp
|
||||
})
|
||||
}
|
||||
|
||||
// WithDecompressor returns a DialOption which sets a Decompressor to use for
|
||||
// incoming message decompression. If incoming response messages are encoded
|
||||
// using the decompressor's Type(), it will be used. Otherwise, the message
|
||||
// encoding will be used to look up the compressor registered via
|
||||
// encoding.RegisterCompressor, which will then be used to decompress the
|
||||
// message. If no compressor is registered for the encoding, an Unimplemented
|
||||
// status error will be returned.
|
||||
//
|
||||
// Deprecated: use encoding.RegisterCompressor instead.
|
||||
func WithDecompressor(dc Decompressor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.dc = dc
|
||||
})
|
||||
}
|
||||
|
||||
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
||||
// Name resolver will be ignored if this DialOption is specified.
|
||||
//
|
||||
// Deprecated: use the new balancer APIs in balancer package and
|
||||
// WithBalancerName.
|
||||
func WithBalancer(b Balancer) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.balancerBuilder = &balancerWrapperBuilder{
|
||||
b: b,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||
// with. Balancer registered with balancerName will be used. This function
|
||||
// panics if no balancer was registered by balancerName.
|
||||
//
|
||||
// The balancer cannot be overridden by balancer option specified by service
|
||||
// config.
|
||||
//
|
||||
// This is an EXPERIMENTAL API.
|
||||
func WithBalancerName(balancerName string) DialOption {
|
||||
builder := balancer.Get(balancerName)
|
||||
if builder == nil {
|
||||
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
||||
}
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.balancerBuilder = builder
|
||||
})
|
||||
}
|
||||
|
||||
// withResolverBuilder is only for grpclb.
|
||||
func withResolverBuilder(b resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolverBuilder = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||
// service configuration.
|
||||
//
|
||||
// Deprecated: service config should be received through name resolver, as
|
||||
// specified here.
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.scChan = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||
// when backing off after failed connection attempts.
|
||||
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||
}
|
||||
|
||||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
//
|
||||
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||
// for use.
|
||||
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||
return withBackoff(backoff.Exponential{
|
||||
MaxDelay: b.MaxDelay,
|
||||
})
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for connectRetryNum after a failed
|
||||
// connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||
func withBackoff(bs backoff.Strategy) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
})
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the
|
||||
// underlying connection is up. Without this, Dial returns immediately and
|
||||
// connecting the server happens in background.
|
||||
func WithBlock() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this
|
||||
// ClientConn. Note that transport security is required unless WithInsecure is
|
||||
// set.
|
||||
func WithInsecure() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithTransportCredentials returns a DialOption which configures a connection
|
||||
// level security credentials (e.g., TLS/SSL).
|
||||
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.TransportCredentials = creds
|
||||
})
|
||||
}
|
||||
|
||||
// WithPerRPCCredentials returns a DialOption which sets credentials and places
|
||||
// auth state on each outbound RPC.
|
||||
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
|
||||
})
|
||||
}
|
||||
|
||||
// WithTimeout returns a DialOption that configures a timeout for dialing a
|
||||
// ClientConn initially. This is valid if and only if WithBlock() is present.
|
||||
//
|
||||
// Deprecated: use DialContext and context.WithTimeout instead.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.timeout = d
|
||||
})
|
||||
}
|
||||
|
||||
func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.Dialer = f
|
||||
})
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.WithContextDialer = withContextDialer
|
||||
internal.WithResolverBuilder = withResolverBuilder
|
||||
}
|
||||
|
||||
// WithDialer returns a DialOption that specifies a function to use for dialing
|
||||
// network addresses. If FailOnNonTempDialError() is set to true, and an error
|
||||
// is returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||
// should try to reconnect to the network address.
|
||||
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||
return withContextDialer(
|
||||
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return f(addr, deadline.Sub(time.Now()))
|
||||
}
|
||||
return f(addr, 0)
|
||||
})
|
||||
}
|
||||
|
||||
// WithStatsHandler returns a DialOption that specifies the stats handler for
|
||||
// all the RPCs and underlying network connections in this ClientConn.
|
||||
func WithStatsHandler(h stats.Handler) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.StatsHandler = h
|
||||
})
|
||||
}
|
||||
|
||||
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
|
||||
// non-temporary dial errors. If f is true, and dialer returns a non-temporary
|
||||
// error, gRPC will fail the connection to the network address and won't try to
|
||||
// reconnect. The default value of FailOnNonTempDialError is false.
|
||||
//
|
||||
// This is an EXPERIMENTAL API.
|
||||
func FailOnNonTempDialError(f bool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.FailOnNonTempDialError = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithUserAgent returns a DialOption that specifies a user agent string for all
|
||||
// the RPCs.
|
||||
func WithUserAgent(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.UserAgent = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
|
||||
// for the client transport.
|
||||
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.KeepaliveParams = kp
|
||||
})
|
||||
}
|
||||
|
||||
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
|
||||
// unary RPCs.
|
||||
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.unaryInt = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithStreamInterceptor returns a DialOption that specifies the interceptor for
|
||||
// streaming RPCs.
|
||||
func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.streamInt = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithAuthority returns a DialOption that specifies the value to be used as the
|
||||
// :authority pseudo-header. This value only works with WithInsecure and has no
|
||||
// effect if TransportCredentials are present.
|
||||
func WithAuthority(a string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.authority = a
|
||||
})
|
||||
}
|
||||
|
||||
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
|
||||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
func WithChannelzParentID(id int64) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
|
||||
// service config provided by the resolver and provides a hint to the resolver
|
||||
// to not fetch service configs.
|
||||
func WithDisableServiceConfig() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableServiceConfig = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisableRetry returns a DialOption that disables retries, even if the
|
||||
// service config enables them. This does not impact transparent retries, which
|
||||
// will happen automatically if no data is written to the wire or if the RPC is
|
||||
// unprocessed by the remote server.
|
||||
//
|
||||
// Retry support is currently disabled by default, but will be enabled by
|
||||
// default in the future. Until then, it may be enabled by setting the
|
||||
// environment variable "GRPC_GO_RETRY" to "on".
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithDisableRetry() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableRetry = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
|
||||
// (uncompressed) size of header list that the client is prepared to accept.
|
||||
func WithMaxHeaderListSize(s uint32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.MaxHeaderListSize = &s
|
||||
})
|
||||
}
|
||||
|
||||
func defaultDialOptions() dialOptions {
|
||||
return dialOptions{
|
||||
disableRetry: !envconfig.Retry,
|
||||
copts: transport.ConnectOptions{
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// This package is EXPERIMENTAL.
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Identity specifies the optional encoding for uncompressed streams.
|
||||
// It is intended for grpc internal use only.
|
||||
const Identity = "identity"
|
||||
|
||||
// Compressor is used for compressing and decompressing when sending or
|
||||
// receiving messages.
|
||||
type Compressor interface {
|
||||
// Compress writes the data written to wc to w after compressing it. If an
|
||||
// error occurs while initializing the compressor, that error is returned
|
||||
// instead.
|
||||
Compress(w io.Writer) (io.WriteCloser, error)
|
||||
// Decompress reads data from r, decompresses it, and provides the
|
||||
// uncompressed data via the returned io.Reader. If an error occurs while
|
||||
// initializing the decompressor, that error is returned instead.
|
||||
Decompress(r io.Reader) (io.Reader, error)
|
||||
// Name is the name of the compression codec and is used to set the content
|
||||
// coding header. The result must be static; the result cannot change
|
||||
// between calls.
|
||||
Name() string
|
||||
}
|
||||
|
||||
var registeredCompressor = make(map[string]Compressor)
|
||||
|
||||
// RegisterCompressor registers the compressor with gRPC by its name. It can
|
||||
// be activated when sending an RPC via grpc.UseCompressor(). It will be
|
||||
// automatically accessed when receiving a message based on the content coding
|
||||
// header. Servers also use it to send a response with the same encoding as
|
||||
// the request.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
func GetCompressor(name string) Compressor {
|
||||
return registeredCompressor[name]
|
||||
}
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages. Note
|
||||
// that implementations of this interface must be thread safe; a Codec's
|
||||
// methods can be called from concurrent goroutines.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// Name returns the name of the Codec implementation. The returned string
|
||||
// will be used as part of content type in transmission. The result must be
|
||||
// static; the result cannot change between calls.
|
||||
Name() string
|
||||
}
|
||||
|
||||
var registeredCodecs = make(map[string]Codec)
|
||||
|
||||
// RegisterCodec registers the provided Codec for use with all gRPC clients and
|
||||
// servers.
|
||||
//
|
||||
// The Codec will be stored and looked up by result of its Name() method, which
|
||||
// should match the content-subtype of the encoding handled by the Codec. This
|
||||
// is case-insensitive, and is stored and looked up as lowercase. If the
|
||||
// result of calling Name() is an empty string, RegisterCodec will panic. See
|
||||
// Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCodec(codec Codec) {
|
||||
if codec == nil {
|
||||
panic("cannot register a nil Codec")
|
||||
}
|
||||
contentSubtype := strings.ToLower(codec.Name())
|
||||
if contentSubtype == "" {
|
||||
panic("cannot register Codec with empty string result for String()")
|
||||
}
|
||||
registeredCodecs[contentSubtype] = codec
|
||||
}
|
||||
|
||||
// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
|
||||
// registered for the content-subtype.
|
||||
//
|
||||
// The content-subtype is expected to be lowercase.
|
||||
func GetCodec(contentSubtype string) Codec {
|
||||
return registeredCodecs[contentSubtype]
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package proto defines the protobuf codec. Importing this package will
|
||||
// register the codec.
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
)
|
||||
|
||||
// Name is the name registered for the proto compressor.
|
||||
const Name = "proto"
|
||||
|
||||
func init() {
|
||||
encoding.RegisterCodec(codec{})
|
||||
}
|
||||
|
||||
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type codec struct{}
|
||||
|
||||
type cachedProtoBuffer struct {
|
||||
lastMarshaledSize uint32
|
||||
proto.Buffer
|
||||
}
|
||||
|
||||
func capToMaxInt32(val int) uint32 {
|
||||
if val > math.MaxInt32 {
|
||||
return uint32(math.MaxInt32)
|
||||
}
|
||||
return uint32(val)
|
||||
}
|
||||
|
||||
func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||
protoMsg := v.(proto.Message)
|
||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||
|
||||
cb.SetBuf(newSlice)
|
||||
cb.Reset()
|
||||
if err := cb.Marshal(protoMsg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := cb.Bytes()
|
||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||
if pm, ok := v.(proto.Marshaler); ok {
|
||||
// object can marshal itself, no need for buffer
|
||||
return pm.Marshal()
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
out, err := marshal(v, cb)
|
||||
|
||||
// put back buffer and lose the ref to the slice
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (codec) Unmarshal(data []byte, v interface{}) error {
|
||||
protoMsg := v.(proto.Message)
|
||||
protoMsg.Reset()
|
||||
|
||||
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
|
||||
// object can unmarshal itself, no need for buffer
|
||||
return pu.Unmarshal(data)
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
cb.SetBuf(data)
|
||||
err := cb.Unmarshal(protoMsg)
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return err
|
||||
}
|
||||
|
||||
func (codec) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
var protoBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &cachedProtoBuffer{
|
||||
Buffer: proto.Buffer{},
|
||||
lastMarshaledSize: 16,
|
||||
}
|
||||
},
|
||||
}
|
|
@ -25,12 +25,11 @@ import (
|
|||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
|
@ -48,12 +47,16 @@ func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) erro
|
|||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if err == nil || err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
|
@ -62,37 +65,7 @@ func toRPCErr(err error) error {
|
|||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
case ErrClientConnClosing:
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
|
|
@ -22,15 +22,15 @@ package grpc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
|
@ -41,19 +41,23 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error)
|
|||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if err == nil || err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
|
@ -62,37 +66,7 @@ func toRPCErr(err error) error {
|
|||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
case ErrClientConnClosing:
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
|
|
@ -1,737 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/naming"
|
||||
)
|
||||
|
||||
// Client API for LoadBalancer service.
|
||||
// Mostly copied from generated pb.go file.
|
||||
// To avoid circular dependency.
|
||||
type loadBalancerClient struct {
|
||||
cc *ClientConn
|
||||
}
|
||||
|
||||
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
|
||||
desc := &StreamDesc{
|
||||
StreamName: "BalanceLoad",
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
}
|
||||
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &balanceLoadClientStream{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type balanceLoadClientStream struct {
|
||||
ClientStream
|
||||
}
|
||||
|
||||
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
|
||||
m := new(lbpb.LoadBalanceResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewGRPCLBBalancer creates a grpclb load balancer.
|
||||
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
|
||||
return &balancer{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
type remoteBalancerInfo struct {
|
||||
addr string
|
||||
// the server name used for authentication with the remote LB server.
|
||||
name string
|
||||
}
|
||||
|
||||
// grpclbAddrInfo consists of the information of a backend server.
|
||||
type grpclbAddrInfo struct {
|
||||
addr Address
|
||||
connected bool
|
||||
// dropForRateLimiting indicates whether this particular request should be
|
||||
// dropped by the client for rate limiting.
|
||||
dropForRateLimiting bool
|
||||
// dropForLoadBalancing indicates whether this particular request should be
|
||||
// dropped by the client for load balancing.
|
||||
dropForLoadBalancing bool
|
||||
}
|
||||
|
||||
type balancer struct {
|
||||
r naming.Resolver
|
||||
target string
|
||||
mu sync.Mutex
|
||||
seq int // a sequence number to make sure addrCh does not get stale addresses.
|
||||
w naming.Watcher
|
||||
addrCh chan []Address
|
||||
rbs []remoteBalancerInfo
|
||||
addrs []*grpclbAddrInfo
|
||||
next int
|
||||
waitCh chan struct{}
|
||||
done bool
|
||||
expTimer *time.Timer
|
||||
rand *rand.Rand
|
||||
|
||||
clientStats lbpb.ClientStats
|
||||
}
|
||||
|
||||
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
|
||||
updates, err := w.Next()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
|
||||
return err
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
for _, update := range updates {
|
||||
switch update.Op {
|
||||
case naming.Add:
|
||||
var exist bool
|
||||
for _, v := range b.rbs {
|
||||
// TODO: Is the same addr with different server name a different balancer?
|
||||
if update.Addr == v.addr {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
|
||||
if !ok {
|
||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
||||
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
|
||||
continue
|
||||
}
|
||||
switch md.AddrType {
|
||||
case naming.Backend:
|
||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
||||
grpclog.Errorf("The name resolution does not give grpclb addresses")
|
||||
continue
|
||||
case naming.GRPCLB:
|
||||
b.rbs = append(b.rbs, remoteBalancerInfo{
|
||||
addr: update.Addr,
|
||||
name: md.ServerName,
|
||||
})
|
||||
default:
|
||||
grpclog.Errorf("Received unknow address type %d", md.AddrType)
|
||||
continue
|
||||
}
|
||||
case naming.Delete:
|
||||
for i, v := range b.rbs {
|
||||
if update.Addr == v.addr {
|
||||
copy(b.rbs[i:], b.rbs[i+1:])
|
||||
b.rbs = b.rbs[:len(b.rbs)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
grpclog.Errorf("Unknown update.Op %v", update.Op)
|
||||
}
|
||||
}
|
||||
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
|
||||
// not a load balancer.
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
}
|
||||
ch <- b.rbs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *balancer) serverListExpire(seq int) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
// TODO: gRPC interanls do not clear the connections when the server list is stale.
|
||||
// This means RPCs will keep using the existing server list until b receives new
|
||||
// server list even though the list is expired. Revisit this behavior later.
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
b.next = 0
|
||||
b.addrs = nil
|
||||
// Ask grpc internals to close all the corresponding connections.
|
||||
b.addrCh <- nil
|
||||
}
|
||||
|
||||
func convertDuration(d *lbpb.Duration) time.Duration {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||
}
|
||||
|
||||
func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
servers := l.GetServers()
|
||||
expiration := convertDuration(l.GetExpirationInterval())
|
||||
var (
|
||||
sl []*grpclbAddrInfo
|
||||
addrs []Address
|
||||
)
|
||||
for _, s := range servers {
|
||||
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
|
||||
ip := net.IP(s.IpAddress)
|
||||
ipStr := ip.String()
|
||||
if ip.To4() == nil {
|
||||
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
||||
// net.SplitHostPort() will return too many colons error.
|
||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||
}
|
||||
addr := Address{
|
||||
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
|
||||
Metadata: &md,
|
||||
}
|
||||
sl = append(sl, &grpclbAddrInfo{
|
||||
addr: addr,
|
||||
dropForRateLimiting: s.DropForRateLimiting,
|
||||
dropForLoadBalancing: s.DropForLoadBalancing,
|
||||
})
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
if len(sl) > 0 {
|
||||
// reset b.next to 0 when replacing the server list.
|
||||
b.next = 0
|
||||
b.addrs = sl
|
||||
b.addrCh <- addrs
|
||||
if b.expTimer != nil {
|
||||
b.expTimer.Stop()
|
||||
b.expTimer = nil
|
||||
}
|
||||
if expiration > 0 {
|
||||
b.expTimer = time.AfterFunc(expiration, func() {
|
||||
b.serverListExpire(seq)
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
stats := b.clientStats
|
||||
b.clientStats = lbpb.ClientStats{} // Clear the stats.
|
||||
b.mu.Unlock()
|
||||
t := time.Now()
|
||||
stats.Timestamp = &lbpb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := s.Send(&lbpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
|
||||
ClientStats: &stats,
|
||||
},
|
||||
}); err != nil {
|
||||
grpclog.Errorf("grpclb: failed to send load report: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := lbc.BalanceLoad(ctx)
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.mu.Unlock()
|
||||
initReq := &lbpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
|
||||
InitialRequest: &lbpb.InitialLoadBalanceRequest{
|
||||
Name: b.target,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := stream.Send(initReq); err != nil {
|
||||
grpclog.Errorf("grpclb: failed to send init request: %v", err)
|
||||
// TODO: backoff on retry?
|
||||
return true
|
||||
}
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
|
||||
// TODO: backoff on retry?
|
||||
return true
|
||||
}
|
||||
initResp := reply.GetInitialResponse()
|
||||
if initResp == nil {
|
||||
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
|
||||
return
|
||||
}
|
||||
// TODO: Support delegation.
|
||||
if initResp.LoadBalancerDelegate != "" {
|
||||
// delegation
|
||||
grpclog.Errorf("TODO: Delegation is not supported yet.")
|
||||
return
|
||||
}
|
||||
streamDone := make(chan struct{})
|
||||
defer close(streamDone)
|
||||
b.mu.Lock()
|
||||
b.clientStats = lbpb.ClientStats{} // Clear client stats.
|
||||
b.mu.Unlock()
|
||||
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
||||
go b.sendLoadReport(stream, d, streamDone)
|
||||
}
|
||||
// Retrieve the server list.
|
||||
for {
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
|
||||
break
|
||||
}
|
||||
b.mu.Lock()
|
||||
if b.done || seq < b.seq {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.seq++ // tick when receiving a new list of servers.
|
||||
seq = b.seq
|
||||
b.mu.Unlock()
|
||||
if serverList := reply.GetServerList(); serverList != nil {
|
||||
b.processServerList(serverList, seq)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *balancer) Start(target string, config BalancerConfig) error {
|
||||
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
|
||||
// TODO: Fall back to the basic direct connection if there is no name resolver.
|
||||
if b.r == nil {
|
||||
return errors.New("there is no name resolver installed")
|
||||
}
|
||||
b.target = target
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
b.addrCh = make(chan []Address)
|
||||
w, err := b.r.Resolve(target)
|
||||
if err != nil {
|
||||
b.mu.Unlock()
|
||||
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
|
||||
return err
|
||||
}
|
||||
b.w = w
|
||||
b.mu.Unlock()
|
||||
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
|
||||
// Spawn a goroutine to monitor the name resolution of remote load balancer.
|
||||
go func() {
|
||||
for {
|
||||
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
|
||||
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
|
||||
close(balancerAddrsCh)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Spawn a goroutine to talk to the remote load balancer.
|
||||
go func() {
|
||||
var (
|
||||
cc *ClientConn
|
||||
// ccError is closed when there is an error in the current cc.
|
||||
// A new rb should be picked from rbs and connected.
|
||||
ccError chan struct{}
|
||||
rb *remoteBalancerInfo
|
||||
rbs []remoteBalancerInfo
|
||||
rbIdx int
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if ccError != nil {
|
||||
select {
|
||||
case <-ccError:
|
||||
default:
|
||||
close(ccError)
|
||||
}
|
||||
}
|
||||
if cc != nil {
|
||||
cc.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
select {
|
||||
case rbs, ok = <-balancerAddrsCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
foundIdx := -1
|
||||
if rb != nil {
|
||||
for i, trb := range rbs {
|
||||
if trb == *rb {
|
||||
foundIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if foundIdx >= 0 {
|
||||
if foundIdx >= 1 {
|
||||
// Move the address in use to the beginning of the list.
|
||||
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
|
||||
rbIdx = 0
|
||||
}
|
||||
continue // If found, don't dial new cc.
|
||||
} else if len(rbs) > 0 {
|
||||
// Pick a random one from the list, instead of always using the first one.
|
||||
if l := len(rbs); l > 1 && rb != nil {
|
||||
tmpIdx := b.rand.Intn(l - 1)
|
||||
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
|
||||
}
|
||||
rbIdx = 0
|
||||
rb = &rbs[0]
|
||||
} else {
|
||||
// foundIdx < 0 && len(rbs) <= 0.
|
||||
rb = nil
|
||||
}
|
||||
case <-ccError:
|
||||
ccError = nil
|
||||
if rbIdx < len(rbs)-1 {
|
||||
rbIdx++
|
||||
rb = &rbs[rbIdx]
|
||||
} else {
|
||||
rb = nil
|
||||
}
|
||||
}
|
||||
|
||||
if rb == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if cc != nil {
|
||||
cc.Close()
|
||||
}
|
||||
// Talk to the remote load balancer to get the server list.
|
||||
var (
|
||||
err error
|
||||
dopts []DialOption
|
||||
)
|
||||
if creds := config.DialCreds; creds != nil {
|
||||
if rb.name != "" {
|
||||
if err := creds.OverrideServerName(rb.name); err != nil {
|
||||
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
dopts = append(dopts, WithTransportCredentials(creds))
|
||||
} else {
|
||||
dopts = append(dopts, WithInsecure())
|
||||
}
|
||||
if dialer := config.Dialer; dialer != nil {
|
||||
// WithDialer takes a different type of function, so we instead use a special DialOption here.
|
||||
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
|
||||
}
|
||||
ccError = make(chan struct{})
|
||||
cc, err = Dial(rb.addr, dopts...)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
|
||||
close(ccError)
|
||||
continue
|
||||
}
|
||||
b.mu.Lock()
|
||||
b.seq++ // tick when getting a new balancer address
|
||||
seq := b.seq
|
||||
b.next = 0
|
||||
b.mu.Unlock()
|
||||
go func(cc *ClientConn, ccError chan struct{}) {
|
||||
lbc := &loadBalancerClient{cc}
|
||||
b.callRemoteBalancer(lbc, seq)
|
||||
cc.Close()
|
||||
select {
|
||||
case <-ccError:
|
||||
default:
|
||||
close(ccError)
|
||||
}
|
||||
}(cc, ccError)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *balancer) down(addr Address, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, a := range b.addrs {
|
||||
if addr == a.addr {
|
||||
a.connected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Up(addr Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return nil
|
||||
}
|
||||
var cnt int
|
||||
for _, a := range b.addrs {
|
||||
if a.addr == addr {
|
||||
if a.connected {
|
||||
return nil
|
||||
}
|
||||
a.connected = true
|
||||
}
|
||||
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
// addr is the only one which is connected. Notify the Get() callers who are blocking.
|
||||
if cnt == 1 && b.waitCh != nil {
|
||||
close(b.waitCh)
|
||||
b.waitCh = nil
|
||||
}
|
||||
return func(err error) {
|
||||
b.down(addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
var ch chan struct{}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
seq := b.seq
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
put = func() {
|
||||
s, ok := rpcInfoFromContext(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
b.clientStats.NumCallsFinished++
|
||||
if !s.bytesSent {
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
} else if s.bytesReceived {
|
||||
b.clientStats.NumCallsFinishedKnownReceived++
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
b.clientStats.NumCallsStarted++
|
||||
if len(b.addrs) > 0 {
|
||||
if b.next >= len(b.addrs) {
|
||||
b.next = 0
|
||||
}
|
||||
next := b.next
|
||||
for {
|
||||
a := b.addrs[next]
|
||||
next = (next + 1) % len(b.addrs)
|
||||
if a.connected {
|
||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
addr = a.addr
|
||||
b.next = next
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.next = next
|
||||
if a.dropForLoadBalancing {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
||||
} else if a.dropForRateLimiting {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
||||
}
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next == b.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
if len(b.addrs) == 0 {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on b.addrs for a failfast RPC.
|
||||
addr = b.addrs[b.next].addr
|
||||
b.next++
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Wait on b.waitCh for non-failfast RPCs.
|
||||
if b.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
b.waitCh = ch
|
||||
} else {
|
||||
ch = b.waitCh
|
||||
}
|
||||
b.mu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
b.mu.Lock()
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-ch:
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(b.addrs) > 0 {
|
||||
if b.next >= len(b.addrs) {
|
||||
b.next = 0
|
||||
}
|
||||
next := b.next
|
||||
for {
|
||||
a := b.addrs[next]
|
||||
next = (next + 1) % len(b.addrs)
|
||||
if a.connected {
|
||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
addr = a.addr
|
||||
b.next = next
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.next = next
|
||||
if a.dropForLoadBalancing {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
||||
} else if a.dropForRateLimiting {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
||||
}
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next == b.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The newly added addr got removed by Down() again.
|
||||
if b.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
b.waitCh = ch
|
||||
} else {
|
||||
ch = b.waitCh
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Notify() <-chan []Address {
|
||||
return b.addrCh
|
||||
}
|
||||
|
||||
func (b *balancer) Close() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return errBalancerClosed
|
||||
}
|
||||
b.done = true
|
||||
if b.expTimer != nil {
|
||||
b.expTimer.Stop()
|
||||
}
|
||||
if b.waitCh != nil {
|
||||
close(b.waitCh)
|
||||
}
|
||||
if b.addrCh != nil {
|
||||
close(b.addrCh)
|
||||
}
|
||||
if b.w != nil {
|
||||
b.w.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,629 +0,0 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: grpclb.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package grpc_lb_v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
grpclb.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Duration
|
||||
Timestamp
|
||||
LoadBalanceRequest
|
||||
InitialLoadBalanceRequest
|
||||
ClientStats
|
||||
LoadBalanceResponse
|
||||
InitialLoadBalanceResponse
|
||||
ServerList
|
||||
Server
|
||||
*/
|
||||
package grpc_lb_v1
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LoadBalanceRequest struct {
|
||||
// Types that are valid to be assigned to LoadBalanceRequestType:
|
||||
// *LoadBalanceRequest_InitialRequest
|
||||
// *LoadBalanceRequest_ClientStats
|
||||
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
||||
}
|
||||
|
||||
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
|
||||
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LoadBalanceRequest) ProtoMessage() {}
|
||||
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
type isLoadBalanceRequest_LoadBalanceRequestType interface {
|
||||
isLoadBalanceRequest_LoadBalanceRequestType()
|
||||
}
|
||||
|
||||
type LoadBalanceRequest_InitialRequest struct {
|
||||
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
|
||||
}
|
||||
type LoadBalanceRequest_ClientStats struct {
|
||||
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
|
||||
}
|
||||
|
||||
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
||||
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
||||
|
||||
func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
|
||||
if m != nil {
|
||||
return m.LoadBalanceRequestType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
|
||||
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
|
||||
return x.InitialRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
|
||||
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
|
||||
return x.ClientStats
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
|
||||
(*LoadBalanceRequest_InitialRequest)(nil),
|
||||
(*LoadBalanceRequest_ClientStats)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*LoadBalanceRequest)
|
||||
// load_balance_request_type
|
||||
switch x := m.LoadBalanceRequestType.(type) {
|
||||
case *LoadBalanceRequest_InitialRequest:
|
||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.InitialRequest); err != nil {
|
||||
return err
|
||||
}
|
||||
case *LoadBalanceRequest_ClientStats:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ClientStats); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*LoadBalanceRequest)
|
||||
switch tag {
|
||||
case 1: // load_balance_request_type.initial_request
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(InitialLoadBalanceRequest)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
|
||||
return true, err
|
||||
case 2: // load_balance_request_type.client_stats
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(ClientStats)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*LoadBalanceRequest)
|
||||
// load_balance_request_type
|
||||
switch x := m.LoadBalanceRequestType.(type) {
|
||||
case *LoadBalanceRequest_InitialRequest:
|
||||
s := proto.Size(x.InitialRequest)
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *LoadBalanceRequest_ClientStats:
|
||||
s := proto.Size(x.ClientStats)
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
type InitialLoadBalanceRequest struct {
|
||||
// Name of load balanced service (IE, balancer.service.com)
|
||||
// length should be less than 256 bytes.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
|
||||
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*InitialLoadBalanceRequest) ProtoMessage() {}
|
||||
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *InitialLoadBalanceRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Contains client level statistics that are useful to load balancing. Each
|
||||
// count except the timestamp should be reset to zero after reporting the stats.
|
||||
type ClientStats struct {
|
||||
// The timestamp of generating the report.
|
||||
Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
// The total number of RPCs that started.
|
||||
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
|
||||
// The total number of RPCs that finished.
|
||||
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
|
||||
// The total number of RPCs that were dropped by the client because of rate
|
||||
// limiting.
|
||||
NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
|
||||
// The total number of RPCs that were dropped by the client because of load
|
||||
// balancing.
|
||||
NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
|
||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
||||
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
||||
// The total number of RPCs that finished and are known to have been received
|
||||
// by a server.
|
||||
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ClientStats) Reset() { *m = ClientStats{} }
|
||||
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*ClientStats) ProtoMessage() {}
|
||||
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *ClientStats) GetTimestamp() *Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsStarted() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsStarted
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinished() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinished
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedWithDropForRateLimiting
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedWithDropForLoadBalancing
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedWithClientFailedToSend
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedKnownReceived
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LoadBalanceResponse struct {
|
||||
// Types that are valid to be assigned to LoadBalanceResponseType:
|
||||
// *LoadBalanceResponse_InitialResponse
|
||||
// *LoadBalanceResponse_ServerList
|
||||
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
|
||||
}
|
||||
|
||||
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
|
||||
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LoadBalanceResponse) ProtoMessage() {}
|
||||
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
type isLoadBalanceResponse_LoadBalanceResponseType interface {
|
||||
isLoadBalanceResponse_LoadBalanceResponseType()
|
||||
}
|
||||
|
||||
type LoadBalanceResponse_InitialResponse struct {
|
||||
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
|
||||
}
|
||||
type LoadBalanceResponse_ServerList struct {
|
||||
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
|
||||
}
|
||||
|
||||
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
||||
func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
||||
|
||||
func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
|
||||
if m != nil {
|
||||
return m.LoadBalanceResponseType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
|
||||
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
|
||||
return x.InitialResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LoadBalanceResponse) GetServerList() *ServerList {
|
||||
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
|
||||
return x.ServerList
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
|
||||
(*LoadBalanceResponse_InitialResponse)(nil),
|
||||
(*LoadBalanceResponse_ServerList)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*LoadBalanceResponse)
|
||||
// load_balance_response_type
|
||||
switch x := m.LoadBalanceResponseType.(type) {
|
||||
case *LoadBalanceResponse_InitialResponse:
|
||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.InitialResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
case *LoadBalanceResponse_ServerList:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ServerList); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*LoadBalanceResponse)
|
||||
switch tag {
|
||||
case 1: // load_balance_response_type.initial_response
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(InitialLoadBalanceResponse)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
|
||||
return true, err
|
||||
case 2: // load_balance_response_type.server_list
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(ServerList)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*LoadBalanceResponse)
|
||||
// load_balance_response_type
|
||||
switch x := m.LoadBalanceResponseType.(type) {
|
||||
case *LoadBalanceResponse_InitialResponse:
|
||||
s := proto.Size(x.InitialResponse)
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *LoadBalanceResponse_ServerList:
|
||||
s := proto.Size(x.ServerList)
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
type InitialLoadBalanceResponse struct {
|
||||
// This is an application layer redirect that indicates the client should use
|
||||
// the specified server for load balancing. When this field is non-empty in
|
||||
// the response, the client should open a separate connection to the
|
||||
// load_balancer_delegate and call the BalanceLoad method. Its length should
|
||||
// be less than 64 bytes.
|
||||
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
|
||||
// This interval defines how often the client should send the client stats
|
||||
// to the load balancer. Stats should only be reported when the duration is
|
||||
// positive.
|
||||
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
|
||||
}
|
||||
|
||||
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
|
||||
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*InitialLoadBalanceResponse) ProtoMessage() {}
|
||||
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
|
||||
if m != nil {
|
||||
return m.LoadBalancerDelegate
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
|
||||
if m != nil {
|
||||
return m.ClientStatsReportInterval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerList struct {
|
||||
// Contains a list of servers selected by the load balancer. The list will
|
||||
// be updated when server resolutions change or as needed to balance load
|
||||
// across more servers. The client should consume the server list in order
|
||||
// unless instructed otherwise via the client_config.
|
||||
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
|
||||
// Indicates the amount of time that the client should consider this server
|
||||
// list as valid. It may be considered stale after waiting this interval of
|
||||
// time after receiving the list. If the interval is not positive, the
|
||||
// client can assume the list is valid until the next list is received.
|
||||
ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ServerList) Reset() { *m = ServerList{} }
|
||||
func (m *ServerList) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServerList) ProtoMessage() {}
|
||||
func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
func (m *ServerList) GetServers() []*Server {
|
||||
if m != nil {
|
||||
return m.Servers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerList) GetExpirationInterval() *Duration {
|
||||
if m != nil {
|
||||
return m.ExpirationInterval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains server information. When none of the [drop_for_*] fields are true,
|
||||
// use the other fields. When drop_for_rate_limiting is true, ignore all other
|
||||
// fields. Use drop_for_load_balancing only when it is true and
|
||||
// drop_for_rate_limiting is false.
|
||||
type Server struct {
|
||||
// A resolved address for the server, serialized in network-byte-order. It may
|
||||
// either be an IPv4 or IPv6 address.
|
||||
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
|
||||
// A resolved port number for the server.
|
||||
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
|
||||
// An opaque but printable token given to the frontend for each pick. All
|
||||
// frontend requests for that pick must include the token in its initial
|
||||
// metadata. The token is used by the backend to verify the request and to
|
||||
// allow the backend to report load to the gRPC LB system.
|
||||
//
|
||||
// Its length is variable but less than 50 bytes.
|
||||
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for rate limiting.
|
||||
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for load balancing.
|
||||
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Server) Reset() { *m = Server{} }
|
||||
func (m *Server) String() string { return proto.CompactTextString(m) }
|
||||
func (*Server) ProtoMessage() {}
|
||||
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||
|
||||
func (m *Server) GetIpAddress() []byte {
|
||||
if m != nil {
|
||||
return m.IpAddress
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Server) GetPort() int32 {
|
||||
if m != nil {
|
||||
return m.Port
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Server) GetLoadBalanceToken() string {
|
||||
if m != nil {
|
||||
return m.LoadBalanceToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Server) GetDropForRateLimiting() bool {
|
||||
if m != nil {
|
||||
return m.DropForRateLimiting
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Server) GetDropForLoadBalancing() bool {
|
||||
if m != nil {
|
||||
return m.DropForLoadBalancing
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
|
||||
proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
|
||||
proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
|
||||
proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
|
||||
proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
|
||||
proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
|
||||
proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
|
||||
proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
|
||||
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 733 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
|
||||
0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34,
|
||||
0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a,
|
||||
0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9,
|
||||
0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1,
|
||||
0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92,
|
||||
0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51,
|
||||
0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0,
|
||||
0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51,
|
||||
0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7,
|
||||
0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4,
|
||||
0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13,
|
||||
0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67,
|
||||
0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed,
|
||||
0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93,
|
||||
0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f,
|
||||
0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2,
|
||||
0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4,
|
||||
0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd,
|
||||
0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2,
|
||||
0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd,
|
||||
0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71,
|
||||
0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a,
|
||||
0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c,
|
||||
0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c,
|
||||
0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0,
|
||||
0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84,
|
||||
0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37,
|
||||
0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5,
|
||||
0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f,
|
||||
0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07,
|
||||
0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71,
|
||||
0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f,
|
||||
0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87,
|
||||
0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94,
|
||||
0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56,
|
||||
0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9,
|
||||
0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a,
|
||||
0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e,
|
||||
0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87,
|
||||
0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28,
|
||||
0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70,
|
||||
0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94,
|
||||
0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5,
|
||||
0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff,
|
||||
0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00,
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.lb.v1;
|
||||
|
||||
message Duration {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
|
||||
service LoadBalancer {
|
||||
// Bidirectional rpc to get a list of servers.
|
||||
rpc BalanceLoad(stream LoadBalanceRequest)
|
||||
returns (stream LoadBalanceResponse);
|
||||
}
|
||||
|
||||
message LoadBalanceRequest {
|
||||
oneof load_balance_request_type {
|
||||
// This message should be sent on the first request to the load balancer.
|
||||
InitialLoadBalanceRequest initial_request = 1;
|
||||
|
||||
// The client stats should be periodically reported to the load balancer
|
||||
// based on the duration defined in the InitialLoadBalanceResponse.
|
||||
ClientStats client_stats = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InitialLoadBalanceRequest {
|
||||
// Name of load balanced service (IE, balancer.service.com)
|
||||
// length should be less than 256 bytes.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Contains client level statistics that are useful to load balancing. Each
|
||||
// count except the timestamp should be reset to zero after reporting the stats.
|
||||
message ClientStats {
|
||||
// The timestamp of generating the report.
|
||||
Timestamp timestamp = 1;
|
||||
|
||||
// The total number of RPCs that started.
|
||||
int64 num_calls_started = 2;
|
||||
|
||||
// The total number of RPCs that finished.
|
||||
int64 num_calls_finished = 3;
|
||||
|
||||
// The total number of RPCs that were dropped by the client because of rate
|
||||
// limiting.
|
||||
int64 num_calls_finished_with_drop_for_rate_limiting = 4;
|
||||
|
||||
// The total number of RPCs that were dropped by the client because of load
|
||||
// balancing.
|
||||
int64 num_calls_finished_with_drop_for_load_balancing = 5;
|
||||
|
||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
||||
int64 num_calls_finished_with_client_failed_to_send = 6;
|
||||
|
||||
// The total number of RPCs that finished and are known to have been received
|
||||
// by a server.
|
||||
int64 num_calls_finished_known_received = 7;
|
||||
}
|
||||
|
||||
message LoadBalanceResponse {
|
||||
oneof load_balance_response_type {
|
||||
// This message should be sent on the first response to the client.
|
||||
InitialLoadBalanceResponse initial_response = 1;
|
||||
|
||||
// Contains the list of servers selected by the load balancer. The client
|
||||
// should send requests to these servers in the specified order.
|
||||
ServerList server_list = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message InitialLoadBalanceResponse {
|
||||
// This is an application layer redirect that indicates the client should use
|
||||
// the specified server for load balancing. When this field is non-empty in
|
||||
// the response, the client should open a separate connection to the
|
||||
// load_balancer_delegate and call the BalanceLoad method. Its length should
|
||||
// be less than 64 bytes.
|
||||
string load_balancer_delegate = 1;
|
||||
|
||||
// This interval defines how often the client should send the client stats
|
||||
// to the load balancer. Stats should only be reported when the duration is
|
||||
// positive.
|
||||
Duration client_stats_report_interval = 2;
|
||||
}
|
||||
|
||||
message ServerList {
|
||||
// Contains a list of servers selected by the load balancer. The list will
|
||||
// be updated when server resolutions change or as needed to balance load
|
||||
// across more servers. The client should consume the server list in order
|
||||
// unless instructed otherwise via the client_config.
|
||||
repeated Server servers = 1;
|
||||
|
||||
// Indicates the amount of time that the client should consider this server
|
||||
// list as valid. It may be considered stale after waiting this interval of
|
||||
// time after receiving the list. If the interval is not positive, the
|
||||
// client can assume the list is valid until the next list is received.
|
||||
Duration expiration_interval = 3;
|
||||
}
|
||||
|
||||
// Contains server information. When none of the [drop_for_*] fields are true,
|
||||
// use the other fields. When drop_for_rate_limiting is true, ignore all other
|
||||
// fields. Use drop_for_load_balancing only when it is true and
|
||||
// drop_for_rate_limiting is false.
|
||||
message Server {
|
||||
// A resolved address for the server, serialized in network-byte-order. It may
|
||||
// either be an IPv4 or IPv6 address.
|
||||
bytes ip_address = 1;
|
||||
|
||||
// A resolved port number for the server.
|
||||
int32 port = 2;
|
||||
|
||||
// An opaque but printable token given to the frontend for each pick. All
|
||||
// frontend requests for that pick must include the token in its initial
|
||||
// metadata. The token is used by the backend to verify the request and to
|
||||
// allow the backend to report load to the gRPC LB system.
|
||||
//
|
||||
// Its length is variable but less than 50 bytes.
|
||||
string load_balance_token = 3;
|
||||
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for rate limiting.
|
||||
bool drop_for_rate_limiting = 4;
|
||||
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for load balancing.
|
||||
bool drop_for_load_balancing = 5;
|
||||
}
|
|
@ -105,18 +105,21 @@ func Fatalln(args ...interface{}) {
|
|||
}
|
||||
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
//
|
||||
// Deprecated: use Info.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
//
|
||||
// Deprecated: use Infof.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
//
|
||||
// Deprecated: use Infoln.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package grpclog
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
//
|
||||
// Deprecated: use LoggerV2.
|
||||
type Logger interface {
|
||||
Fatal(args ...interface{})
|
||||
|
@ -31,6 +32,7 @@ type Logger interface {
|
|||
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
// init() functions.
|
||||
//
|
||||
// Deprecated: use SetLoggerV2.
|
||||
func SetLogger(l Logger) {
|
||||
logger = &loggerWrapper{Logger: l}
|
||||
|
|
|
@ -1,18 +1,7 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: health.proto
|
||||
// DO NOT EDIT!
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
/*
|
||||
Package grpc_health_v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
health.proto
|
||||
|
||||
It has these top-level messages:
|
||||
HealthCheckRequest
|
||||
HealthCheckResponse
|
||||
*/
|
||||
package grpc_health_v1
|
||||
package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
|
@ -57,26 +46,84 @@ func (x HealthCheckResponse_ServingStatus) String() string {
|
|||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||
}
|
||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{1, 0}
|
||||
return fileDescriptor_health_85731b6c49265086, []int{1, 0}
|
||||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
||||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckRequest) ProtoMessage() {}
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_85731b6c49265086, []int{0}
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckRequest.Size(m)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *HealthCheckRequest) GetService() string {
|
||||
if m != nil {
|
||||
return m.Service
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type HealthCheckResponse struct {
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
||||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckResponse) ProtoMessage() {}
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_85731b6c49265086, []int{1}
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckResponse.Size(m)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||
if m != nil {
|
||||
return m.Status
|
||||
}
|
||||
return HealthCheckResponse_UNKNOWN
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
|
||||
|
@ -92,8 +139,9 @@ var _ grpc.ClientConn
|
|||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Health service
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type HealthClient interface {
|
||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||
}
|
||||
|
@ -108,15 +156,14 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient {
|
|||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
out := new(HealthCheckResponse)
|
||||
err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Health service
|
||||
|
||||
// HealthServer is the server API for Health service.
|
||||
type HealthServer interface {
|
||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||
}
|
||||
|
@ -153,24 +200,28 @@ var _Health_serviceDesc = grpc.ServiceDesc{
|
|||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "health.proto",
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("health.proto", fileDescriptor0) }
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_85731b6c49265086) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 204 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,
|
||||
0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83,
|
||||
0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41,
|
||||
0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9,
|
||||
0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14,
|
||||
0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5,
|
||||
0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32,
|
||||
0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10,
|
||||
0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a,
|
||||
0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13,
|
||||
0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09,
|
||||
0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04,
|
||||
0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00,
|
||||
var fileDescriptor_health_85731b6c49265086 = []byte{
|
||||
// 271 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, 0x84,
|
||||
0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, 0xb8,
|
||||
0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, 0x70,
|
||||
0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, 0xf3,
|
||||
0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x91, 0x63, 0x03, 0xc4, 0x8e, 0x87, 0xb0, 0xe3, 0xcb, 0x0c,
|
||||
0x57, 0x31, 0xf1, 0xb9, 0x83, 0x4c, 0x83, 0x18, 0xa1, 0x17, 0x66, 0x98, 0xc4, 0x06, 0x8e, 0x24,
|
||||
0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0x66, 0x81, 0xcb, 0xc3, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
// Copyright 2017 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.health.v1;
|
||||
|
||||
message HealthCheckRequest {
|
||||
string service = 1;
|
||||
}
|
||||
|
||||
message HealthCheckResponse {
|
||||
enum ServingStatus {
|
||||
UNKNOWN = 0;
|
||||
SERVING = 1;
|
||||
NOT_SERVING = 2;
|
||||
}
|
||||
ServingStatus status = 1;
|
||||
}
|
||||
|
||||
service Health{
|
||||
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
|
||||
}
|
|
@ -16,6 +16,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
//go:generate ./regenerate.sh
|
||||
|
||||
// Package health provides some utility functions to health-check a server. The implementation
|
||||
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
|
||||
package health
|
||||
|
@ -24,9 +26,9 @@ import (
|
|||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Server implements `service Health`.
|
||||
|
@ -58,7 +60,7 @@ func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*h
|
|||
Status: status,
|
||||
}, nil
|
||||
}
|
||||
return nil, grpc.Errorf(codes.NotFound, "unknown service")
|
||||
return nil, status.Error(codes.NotFound, "unknown service")
|
||||
}
|
||||
|
||||
// SetServingStatus is called when need to reset the serving status of a service
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2018 gRPC authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
TMP=$(mktemp -d)
|
||||
|
||||
function finish {
|
||||
rm -rf "$TMP"
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
pushd "$TMP"
|
||||
mkdir -p grpc/health/v1
|
||||
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto
|
||||
|
||||
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto
|
||||
popd
|
||||
rm -f grpc_health_v1/*.pb.go
|
||||
cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
TMP=$(mktemp -d /tmp/sdk.XXX) \
|
||||
&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.64.zip" \
|
||||
&& unzip -q $TMP.zip -d $TMP \
|
||||
&& export PATH="$PATH:$TMP/go_appengine"
|
|
@ -48,7 +48,9 @@ type UnaryServerInfo struct {
|
|||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC.
|
||||
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||
// the status message of the RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff implement the backoff strategy for gRPC.
|
||||
//
|
||||
// This is kept in internal until the gRPC project decides whether or not to
|
||||
// allow alternative backoff strategies.
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
// failure.
|
||||
//
|
||||
type Strategy interface {
|
||||
// Backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
Backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay = 1.0 * time.Second
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor = 1.6
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter = 0.2
|
||||
)
|
||||
|
||||
// Exponential implements exponential backoff algorithm as defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
type Exponential struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// Backoff returns the amount of time to wait before the next retry given the
|
||||
// number of retries.
|
||||
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
|
@ -0,0 +1,573 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package channelz defines APIs for enabling channelz service, entry
|
||||
// registration/deletion, and accessing channelz data. It also defines channelz
|
||||
// metric struct formats.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = 50
|
||||
curState int32
|
||||
)
|
||||
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
NewChannelzStorage()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// IsOn returns whether channelz data collection is on.
|
||||
func IsOn() bool {
|
||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||
}
|
||||
|
||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||
// provide synchronized functionality to set and get the reference.
|
||||
type dbWrapper struct {
|
||||
mu sync.RWMutex
|
||||
DB *channelMap
|
||||
}
|
||||
|
||||
func (d *dbWrapper) set(db *channelMap) {
|
||||
d.mu.Lock()
|
||||
d.DB = db
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
func (d *dbWrapper) get() *channelMap {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||
//
|
||||
// Note: This function is exported for testing purpose only. User should not call
|
||||
// it in most cases.
|
||||
func NewChannelzStorage() {
|
||||
db.set(&channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
})
|
||||
idGen.reset()
|
||||
}
|
||||
|
||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
// The arg id specifies that only top channel with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||
// sorted in ascending id order.
|
||||
func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id)
|
||||
}
|
||||
|
||||
// GetServers returns a slice of server's ServerMetric, along with a
|
||||
// boolean indicating whether there's more servers to be queried for.
|
||||
//
|
||||
// The arg id specifies that only server with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||
// sorted in ascending id order.
|
||||
func GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id)
|
||||
}
|
||||
|
||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||
// SocketMetric, along with a boolean indicating whether there's more sockets to
|
||||
// be queried for.
|
||||
//
|
||||
// The arg startID specifies that only sockets with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of EntryPerPage,
|
||||
// and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID)
|
||||
}
|
||||
|
||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||
func GetChannel(id int64) *ChannelMetric {
|
||||
return db.get().GetChannel(id)
|
||||
}
|
||||
|
||||
// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
|
||||
func GetSubChannel(id int64) *SubChannelMetric {
|
||||
return db.get().GetSubChannel(id)
|
||||
}
|
||||
|
||||
// GetSocket returns the SocketInternalMetric for the socket (identified by id).
|
||||
func GetSocket(id int64) *SocketMetric {
|
||||
return db.get().GetSocket(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||
// assigned to this channel.
|
||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||
id := idGen.genID()
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
}
|
||||
if pid == 0 {
|
||||
db.get().addChannel(id, cn, true, pid, ref)
|
||||
} else {
|
||||
db.get().addChannel(id, cn, false, pid, ref)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
func RegisterServer(s Server, ref string) int64 {
|
||||
id := idGen.genID()
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
sockets: make(map[int64]string),
|
||||
listenSockets: make(map[int64]string),
|
||||
id: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addListenSocket(id, ls, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addNormalSocket(id, ns, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
||||
// channelz database.
|
||||
func RemoveEntry(id int64) {
|
||||
db.get().removeEntry(id)
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||
// 1. Methods acquire the global lock.
|
||||
// 2. Methods that can only be called when global lock is held.
|
||||
// A second type of method need always to be called inside a first type of method.
|
||||
type channelMap struct {
|
||||
mu sync.RWMutex
|
||||
topLevelChannels map[int64]struct{}
|
||||
servers map[int64]*server
|
||||
channels map[int64]*channel
|
||||
subChannels map[int64]*subChannel
|
||||
listenSockets map[int64]*listenSocket
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
c.servers[id] = s
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
} else {
|
||||
c.findEntry(pid).addChild(id, cn)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
c.subChannels[id] = sc
|
||||
c.findEntry(pid).addChild(id, sc)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
c.findEntry(pid).addChild(id, ls)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
c.findEntry(pid).addChild(id, ns)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the
|
||||
// entry, if it has to wait on the deletion of its children, or may lead to a chain
|
||||
// of entry deletion. For example, deleting the last socket of a gracefully shutting
|
||||
// down server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
var v entry
|
||||
var ok bool
|
||||
if v, ok = c.channels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.subChannels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.servers[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.listenSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.normalSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
// deleteEntry simply deletes an entry from the channelMap. Before calling this
|
||||
// method, caller must check this entry is ready to be deleted, i.e removeEntry()
|
||||
// has been called on it, and no children still exist.
|
||||
// Conditionals are ordered by the expected frequency of deletion of each entity
|
||||
// type, in order to optimize performance.
|
||||
func (c *channelMap) deleteEntry(id int64) {
|
||||
var ok bool
|
||||
if _, ok = c.normalSockets[id]; ok {
|
||||
delete(c.normalSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.subChannels[id]; ok {
|
||||
delete(c.subChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.channels[id]; ok {
|
||||
delete(c.channels, id)
|
||||
delete(c.topLevelChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.listenSockets[id]; ok {
|
||||
delete(c.listenSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.servers[id]; ok {
|
||||
delete(c.servers, id)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func copyMap(m map[int64]string) map[int64]string {
|
||||
n := make(map[int64]string)
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
c.mu.RLock()
|
||||
l := len(c.topLevelChannels)
|
||||
ids := make([]int64, 0, l)
|
||||
cns := make([]*channel, 0, min(l, EntryPerPage))
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
var t []*ChannelMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
cns = append(cns, cn)
|
||||
t = append(t, &ChannelMetric{
|
||||
NestedChans: copyMap(cn.nestedChans),
|
||||
SubChans: copyMap(cn.subChans),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, cn := range cns {
|
||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||
t[i].ID = cn.id
|
||||
t[i].RefName = cn.refName
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
c.mu.RLock()
|
||||
l := len(c.servers)
|
||||
ids := make([]int64, 0, l)
|
||||
ss := make([]*server, 0, min(l, EntryPerPage))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
var s []*ServerMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
ss = append(ss, svr)
|
||||
s = append(s, &ServerMetric{
|
||||
ListenSockets: copyMap(svr.listenSockets),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, svr := range ss {
|
||||
s[i].ServerData = svr.s.ChannelzMetric()
|
||||
s[i].ID = svr.id
|
||||
s[i].RefName = svr.refName
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
// server with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
l := len(svrskts)
|
||||
ids := make([]int64, 0, l)
|
||||
sks := make([]*normalSocket, 0, min(l, EntryPerPage))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort((int64Slice(ids)))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
break
|
||||
}
|
||||
if ns, ok := c.normalSockets[v]; ok {
|
||||
sks = append(sks, ns)
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
var s []*SocketMetric
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
s = append(s, sm)
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||
cm := &ChannelMetric{}
|
||||
var cn *channel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if cn, ok = c.channels[id]; !ok {
|
||||
// channel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.NestedChans = copyMap(cn.nestedChans)
|
||||
cm.SubChans = copyMap(cn.subChans)
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = cn.c.ChannelzMetric()
|
||||
cm.ID = cn.id
|
||||
cm.RefName = cn.refName
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||
cm := &SubChannelMetric{}
|
||||
var sc *subChannel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if sc, ok = c.subChannels[id]; !ok {
|
||||
// subchannel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.Sockets = copyMap(sc.sockets)
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = sc.c.ChannelzMetric()
|
||||
cm.ID = sc.id
|
||||
cm.RefName = sc.refName
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||
sm := &SocketMetric{}
|
||||
c.mu.RLock()
|
||||
if ls, ok := c.listenSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ls.s.ChannelzMetric()
|
||||
sm.ID = ls.id
|
||||
sm.RefName = ls.refName
|
||||
return sm
|
||||
}
|
||||
if ns, ok := c.normalSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
return sm
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
type idGenerator struct {
|
||||
id int64
|
||||
}
|
||||
|
||||
func (i *idGenerator) reset() {
|
||||
atomic.StoreInt64(&i.id, 0)
|
||||
}
|
||||
|
||||
func (i *idGenerator) genID() int64 {
|
||||
return atomic.AddInt64(&i.id, 1)
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
type entry interface {
|
||||
// addChild adds a child e, whose channelz id is id to child list
|
||||
addChild(id int64, e entry)
|
||||
// deleteChild deletes a child with channelz id to be id from child list
|
||||
deleteChild(id int64)
|
||||
// triggerDelete tries to delete self from channelz database. However, if child
|
||||
// list is not empty, then deletion from the database is on hold until the last
|
||||
// child is deleted from database.
|
||||
triggerDelete()
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||
// list is now empty. If both conditions are met, then delete self from database.
|
||||
deleteSelfIfReady()
|
||||
}
|
||||
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
type dummyEntry struct {
|
||||
idNotFound int64
|
||||
}
|
||||
|
||||
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// Note: It is possible for a normal program to reach here under race condition.
|
||||
// For example, there could be a race between ClientConn.Close() info being propagated
|
||||
// to addrConn and http2Client. ClientConn.Close() cancel the context and result
|
||||
// in http2Client to error. The error info is then caught by transport monitor
|
||||
// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
|
||||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ChannelMetric struct {
|
||||
// ID is the channelz id of this channel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this channel.
|
||||
RefName string
|
||||
// ChannelData contains channel internal metric reported by the channel through
|
||||
// ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this channel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this channel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this channel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||
// therefore, this is field is unused.
|
||||
Sockets map[int64]string
|
||||
}
|
||||
|
||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||
// which includes ChannelInternalMetric and channelz-specific data, such as
|
||||
// channelz id, child list, etc.
|
||||
type SubChannelMetric struct {
|
||||
// ID is the channelz id of this subchannel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this subchannel.
|
||||
RefName string
|
||||
// ChannelData contains subchannel internal metric reported by the subchannel
|
||||
// through ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this subchannel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have nested channels
|
||||
// as children, therefore, this field is unused.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this subchannel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have subchannels
|
||||
// as children, therefore, this field is unused.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
Sockets map[int64]string
|
||||
}
|
||||
|
||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||
// should return from ChannelzMetric().
|
||||
type ChannelInternalMetric struct {
|
||||
// current connectivity state of the channel.
|
||||
State connectivity.State
|
||||
// The target this channel originally tried to connect to. May be absent
|
||||
Target string
|
||||
// The number of calls started on the channel.
|
||||
CallsStarted int64
|
||||
// The number of calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp time.Time
|
||||
//TODO: trace
|
||||
}
|
||||
|
||||
// Channel is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Channel or SubChannel.
|
||||
type Channel interface {
|
||||
ChannelzMetric() *ChannelInternalMetric
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
nestedChans map[int64]string
|
||||
subChans map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (c *channel) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *subChannel:
|
||||
c.subChans[id] = v.refName
|
||||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) deleteChild(id int64) {
|
||||
delete(c.subChans, id)
|
||||
delete(c.nestedChans, id)
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) triggerDelete() {
|
||||
c.closeCalled = true
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
// not top channel
|
||||
if c.pid != 0 {
|
||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||
}
|
||||
}
|
||||
|
||||
type subChannel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (sc *subChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteChild(id int64) {
|
||||
delete(sc.sockets, id)
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) triggerDelete() {
|
||||
sc.closeCalled = true
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return
|
||||
}
|
||||
sc.cm.deleteEntry(sc.id)
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
}
|
||||
|
||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||
// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
|
||||
type SocketMetric struct {
|
||||
// ID is the channelz id of this socket.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this socket.
|
||||
RefName string
|
||||
// SocketData contains socket internal metric reported by the socket through
|
||||
// ChannelzMetric().
|
||||
SocketData *SocketInternalMetric
|
||||
}
|
||||
|
||||
// SocketInternalMetric defines the struct that the implementor of Socket interface
|
||||
// should return from ChannelzMetric().
|
||||
type SocketInternalMetric struct {
|
||||
// The number of streams that have been started.
|
||||
StreamsStarted int64
|
||||
// The number of streams that have ended successfully:
|
||||
// On client side, receiving frame with eos bit set.
|
||||
// On server side, sending frame with eos bit set.
|
||||
StreamsSucceeded int64
|
||||
// The number of streams that have ended unsuccessfully:
|
||||
// On client side, termination without receiving frame with eos bit set.
|
||||
// On server side, termination without sending frame with eos bit set.
|
||||
StreamsFailed int64
|
||||
// The number of messages successfully sent on this socket.
|
||||
MessagesSent int64
|
||||
MessagesReceived int64
|
||||
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||
// ping messages.
|
||||
KeepAlivesSent int64
|
||||
// The last time a stream was created by this endpoint. Usually unset for
|
||||
// servers.
|
||||
LastLocalStreamCreatedTimestamp time.Time
|
||||
// The last time a stream was created by the remote endpoint. Usually unset
|
||||
// for clients.
|
||||
LastRemoteStreamCreatedTimestamp time.Time
|
||||
// The last time a message was sent by this endpoint.
|
||||
LastMessageSentTimestamp time.Time
|
||||
// The last time a message was received by this endpoint.
|
||||
LastMessageReceivedTimestamp time.Time
|
||||
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
LocalFlowControlWindow int64
|
||||
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
RemoteFlowControlWindow int64
|
||||
// The locally bound address.
|
||||
LocalAddr net.Addr
|
||||
// The remote bound address. May be absent.
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name.
|
||||
RemoteName string
|
||||
SocketOptions *SocketOptionData
|
||||
Security credentials.ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// Socket is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Socket.
|
||||
type Socket interface {
|
||||
ChannelzMetric() *SocketInternalMetric
|
||||
}
|
||||
|
||||
type listenSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
ls.cm.deleteEntry(ls.id)
|
||||
ls.cm.findEntry(ls.pid).deleteChild(ls.id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
type normalSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
ns.cm.deleteEntry(ns.id)
|
||||
ns.cm.findEntry(ns.pid).deleteChild(ns.id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ServerMetric struct {
|
||||
// ID is the channelz id of this server.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this server.
|
||||
RefName string
|
||||
// ServerData contains server internal metric reported by the server through
|
||||
// ChannelzMetric().
|
||||
ServerData *ServerInternalMetric
|
||||
// ListenSockets tracks the listener socket type children of this server in the
|
||||
// format of a map from socket channelz id to corresponding reference string.
|
||||
ListenSockets map[int64]string
|
||||
}
|
||||
|
||||
// ServerInternalMetric defines the struct that the implementor of Server interface
|
||||
// should return from ChannelzMetric().
|
||||
type ServerInternalMetric struct {
|
||||
// The number of incoming calls started on the server.
|
||||
CallsStarted int64
|
||||
// The number of incoming calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of incoming calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp time.Time
|
||||
//TODO: trace
|
||||
}
|
||||
|
||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||
// Server.
|
||||
type Server interface {
|
||||
ChannelzMetric() *ServerInternalMetric
|
||||
}
|
||||
|
||||
type server struct {
|
||||
refName string
|
||||
s Server
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
listenSockets map[int64]string
|
||||
id int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (s *server) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *normalSocket:
|
||||
s.sockets[id] = v.refName
|
||||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) deleteChild(id int64) {
|
||||
delete(s.sockets, id)
|
||||
delete(s.listenSockets, id)
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) triggerDelete() {
|
||||
s.closeCalled = true
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) deleteSelfIfReady() {
|
||||
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||
return
|
||||
}
|
||||
s.cm.deleteEntry(s.id)
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// SocketOptionData defines the struct to hold socket option data, and related
|
||||
// getter function to obtain info from fd.
|
||||
type SocketOptionData struct {
|
||||
Linger *unix.Linger
|
||||
RecvTimeout *unix.Timeval
|
||||
SendTimeout *unix.Timeval
|
||||
TCPInfo *unix.TCPInfo
|
||||
}
|
||||
|
||||
// Getsockopt defines the function to get socket options requested by channelz.
|
||||
// It is to be passed to syscall.RawConn.Control().
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
|
||||
s.Linger = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
|
||||
s.RecvTimeout = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
|
||||
s.SendTimeout = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
|
||||
s.TCPInfo = v
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
// +build !linux appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
func init() {
|
||||
grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||
}
|
||||
|
||||
// SocketOptionData defines the struct to hold socket option data, and related
|
||||
// getter function to obtain info from fd.
|
||||
// Windows OS doesn't support Socket Option
|
||||
type SocketOptionData struct {
|
||||
}
|
||||
|
||||
// Getsockopt defines the function to get socket options requested by channelz.
|
||||
// It is to be passed to syscall.RawConn.Control().
|
||||
// Windows OS doesn't support Socket Option
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {}
|
39
vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
generated
vendored
Normal file
39
vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// +build linux,go1.9,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(socket interface{}) *SocketOptionData {
|
||||
c, ok := socket.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
data := &SocketOptionData{}
|
||||
if rawConn, err := c.SyscallConn(); err == nil {
|
||||
rawConn.Control(data.Getsockopt)
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
}
|
26
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
generated
vendored
Normal file
26
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// +build !linux !go1.9 appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(c interface{}) *SocketOptionData {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package envconfig contains grpc settings configured by environment variables.
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
)
|
||||
|
||||
var (
|
||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
)
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcrand implements math/rand functions in a concurrent-safe way
|
||||
// with a global random source, independent of math/rand's global source.
|
||||
package grpcrand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
// Int63n implements rand.Int63n on the grpcrand global source.
|
||||
func Int63n(n int64) int64 {
|
||||
mu.Lock()
|
||||
res := r.Int63n(n)
|
||||
mu.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// Intn implements rand.Intn on the grpcrand global source.
|
||||
func Intn(n int) int {
|
||||
mu.Lock()
|
||||
res := r.Intn(n)
|
||||
mu.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
res := r.Float64()
|
||||
mu.Unlock()
|
||||
return res
|
||||
}
|
|
@ -15,20 +15,22 @@
|
|||
*
|
||||
*/
|
||||
|
||||
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
||||
// the godoc of the top-level grpc package.
|
||||
// Package internal contains gRPC-internal code, to avoid polluting
|
||||
// the godoc of the top-level grpc package. It must not import any grpc
|
||||
// symbols to avoid circular dependencies.
|
||||
package internal
|
||||
|
||||
// TestingCloseConns closes all existing transports but keeps
|
||||
// grpcServer.lis accepting new connections.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingCloseConns func(grpcServer interface{})
|
||||
var (
|
||||
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingUseHandlerImpl func(grpcServer interface{})
|
||||
TestingUseHandlerImpl func(grpcServer interface{})
|
||||
|
||||
// WithContextDialer is exported by clientconn.go
|
||||
WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
|
||||
// WithResolverBuilder is exported by clientconn.go
|
||||
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||
)
|
||||
|
|
|
@ -41,12 +41,9 @@ const (
|
|||
gamma = 2
|
||||
)
|
||||
|
||||
var (
|
||||
// Adding arbitrary data to ping so that its ack can be
|
||||
// identified.
|
||||
// Adding arbitrary data to ping so that its ack can be identified.
|
||||
// Easter-egg: what does the ping message say?
|
||||
bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
|
||||
)
|
||||
var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
|
||||
|
||||
type bdpEstimator struct {
|
||||
// sentAt is the time when the ping was sent.
|
||||
|
@ -59,7 +56,7 @@ type bdpEstimator struct {
|
|||
sample uint32
|
||||
// bwMax is the maximum bandwidth noted so far (bytes/sec).
|
||||
bwMax float64
|
||||
// bool to keep track of the begining of a new measurement cycle.
|
||||
// bool to keep track of the beginning of a new measurement cycle.
|
||||
isSent bool
|
||||
// Callback to update the window sizes.
|
||||
updateFlowControl func(n uint32)
|
||||
|
@ -70,7 +67,7 @@ type bdpEstimator struct {
|
|||
}
|
||||
|
||||
// timesnap registers the time bdp ping was sent out so that
|
||||
// network rtt can be calculated when its ack is recieved.
|
||||
// network rtt can be calculated when its ack is received.
|
||||
// It is called (by controller) when the bdpPing is
|
||||
// being written on the wire.
|
||||
func (b *bdpEstimator) timesnap(d [8]byte) {
|
||||
|
@ -119,7 +116,7 @@ func (b *bdpEstimator) calculate(d [8]byte) {
|
|||
b.rtt += (rttSample - b.rtt) * float64(alpha)
|
||||
}
|
||||
b.isSent = false
|
||||
// The number of bytes accumalated so far in the sample is smaller
|
||||
// The number of bytes accumulated so far in the sample is smaller
|
||||
// than or equal to 1.5 times the real BDP on a saturated connection.
|
||||
bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
|
||||
if bwCurrent > b.bwMax {
|
|
@ -0,0 +1,856 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
e.SetMaxDynamicTableSizeLimit(v)
|
||||
}
|
||||
|
||||
type itemNode struct {
|
||||
it interface{}
|
||||
next *itemNode
|
||||
}
|
||||
|
||||
type itemList struct {
|
||||
head *itemNode
|
||||
tail *itemNode
|
||||
}
|
||||
|
||||
func (il *itemList) enqueue(i interface{}) {
|
||||
n := &itemNode{it: i}
|
||||
if il.tail == nil {
|
||||
il.head, il.tail = n, n
|
||||
return
|
||||
}
|
||||
il.tail.next = n
|
||||
il.tail = n
|
||||
}
|
||||
|
||||
// peek returns the first item in the list without removing it from the
|
||||
// list.
|
||||
func (il *itemList) peek() interface{} {
|
||||
return il.head.it
|
||||
}
|
||||
|
||||
func (il *itemList) dequeue() interface{} {
|
||||
if il.head == nil {
|
||||
return nil
|
||||
}
|
||||
i := il.head.it
|
||||
il.head = il.head.next
|
||||
if il.head == nil {
|
||||
il.tail = nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (il *itemList) dequeueAll() *itemNode {
|
||||
h := il.head
|
||||
il.head, il.tail = nil, nil
|
||||
return h
|
||||
}
|
||||
|
||||
func (il *itemList) isEmpty() bool {
|
||||
return il.head == nil
|
||||
}
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
// registerStream is used to register an incoming stream with loopy writer.
|
||||
type registerStream struct {
|
||||
streamID uint32
|
||||
wq *writeQuota
|
||||
}
|
||||
|
||||
// headerFrame is also used to register stream on the client-side.
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
onOrphaned func(error) // Valid on client-side
|
||||
}
|
||||
|
||||
type cleanupStream struct {
|
||||
streamID uint32
|
||||
idPtr *uint32
|
||||
rst bool
|
||||
rstCode http2.ErrCode
|
||||
onWrite func()
|
||||
}
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
h []byte
|
||||
d []byte
|
||||
// onEachWrite is called every time
|
||||
// a part of d is written out.
|
||||
onEachWrite func()
|
||||
}
|
||||
|
||||
type incomingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
type outgoingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
type incomingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
type outgoingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
type settingsAck struct {
|
||||
}
|
||||
|
||||
type incomingGoAway struct {
|
||||
}
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
type outFlowControlSizeRequest struct {
|
||||
resp chan uint32
|
||||
}
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
active outStreamState = iota
|
||||
empty
|
||||
waitingOnStreamQuota
|
||||
)
|
||||
|
||||
type outStream struct {
|
||||
id uint32
|
||||
state outStreamState
|
||||
itl *itemList
|
||||
bytesOutStanding int
|
||||
wq *writeQuota
|
||||
|
||||
next *outStream
|
||||
prev *outStream
|
||||
}
|
||||
|
||||
func (s *outStream) deleteSelf() {
|
||||
if s.prev != nil {
|
||||
s.prev.next = s.next
|
||||
}
|
||||
if s.next != nil {
|
||||
s.next.prev = s.prev
|
||||
}
|
||||
s.next, s.prev = nil, nil
|
||||
}
|
||||
|
||||
type outStreamList struct {
|
||||
// Following are sentinel objects that mark the
|
||||
// beginning and end of the list. They do not
|
||||
// contain any item lists. All valid objects are
|
||||
// inserted in between them.
|
||||
// This is needed so that an outStream object can
|
||||
// deleteSelf() in O(1) time without knowing which
|
||||
// list it belongs to.
|
||||
head *outStream
|
||||
tail *outStream
|
||||
}
|
||||
|
||||
func newOutStreamList() *outStreamList {
|
||||
head, tail := new(outStream), new(outStream)
|
||||
head.next = tail
|
||||
tail.prev = head
|
||||
return &outStreamList{
|
||||
head: head,
|
||||
tail: tail,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *outStreamList) enqueue(s *outStream) {
|
||||
e := l.tail.prev
|
||||
e.next = s
|
||||
s.prev = e
|
||||
s.next = l.tail
|
||||
l.tail.prev = s
|
||||
}
|
||||
|
||||
// remove from the beginning of the list.
|
||||
func (l *outStreamList) dequeue() *outStream {
|
||||
b := l.head.next
|
||||
if b == l.tail {
|
||||
return nil
|
||||
}
|
||||
b.deleteSelf()
|
||||
return b
|
||||
}
|
||||
|
||||
// controlBuffer is a way to pass information to loopy.
|
||||
// Information is passed as specific struct types called control frames.
|
||||
// A control frame not only represents data, messages or headers to be sent out
|
||||
// but can also be used to instruct loopy to update its internal state.
|
||||
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
|
||||
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
|
||||
type controlBuffer struct {
|
||||
ch chan struct{}
|
||||
done <-chan struct{}
|
||||
mu sync.Mutex
|
||||
consumerWaiting bool
|
||||
list *itemList
|
||||
err error
|
||||
}
|
||||
|
||||
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
return &controlBuffer{
|
||||
ch: make(chan struct{}, 1),
|
||||
list: &itemList{},
|
||||
done: done,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) put(it interface{}) error {
|
||||
_, err := c.executeAndPut(nil, it)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if f != nil {
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if c.consumerWaiting {
|
||||
wakeUp = true
|
||||
c.consumerWaiting = false
|
||||
}
|
||||
c.list.enqueue(it)
|
||||
c.mu.Unlock()
|
||||
if wakeUp {
|
||||
select {
|
||||
case c.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Note argument f should never be nil.
|
||||
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
for {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return nil, c.err
|
||||
}
|
||||
if !c.list.isEmpty() {
|
||||
h := c.list.dequeue()
|
||||
c.mu.Unlock()
|
||||
return h, nil
|
||||
}
|
||||
if !block {
|
||||
c.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
c.consumerWaiting = true
|
||||
c.mu.Unlock()
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
c.finish()
|
||||
return nil, ErrConnClosing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) finish() {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.err = ErrConnClosing
|
||||
// There may be headers for streams in the control buffer.
|
||||
// These streams need to be cleaned out since the transport
|
||||
// is still not aware of these yet.
|
||||
for head := c.list.dequeueAll(); head != nil; head = head.next {
|
||||
hdr, ok := head.it.(*headerFrame)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if hdr.onOrphaned != nil { // It will be nil on the server-side.
|
||||
hdr.onOrphaned(ErrConnClosing)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type side int
|
||||
|
||||
const (
|
||||
clientSide side = iota
|
||||
serverSide
|
||||
)
|
||||
|
||||
// Loopy receives frames from the control buffer.
|
||||
// Each frame is handled individually; most of the work done by loopy goes
|
||||
// into handling data frames. Loopy maintains a queue of active streams, and each
|
||||
// stream maintains a queue of data frames; as loopy receives data frames
|
||||
// it gets added to the queue of the relevant stream.
|
||||
// Loopy goes over this list of active streams by processing one node every iteration,
|
||||
// thereby closely resemebling to a round-robin scheduling over all streams. While
|
||||
// processing a stream, loopy writes out data bytes from this stream capped by the min
|
||||
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
|
||||
type loopyWriter struct {
|
||||
side side
|
||||
cbuf *controlBuffer
|
||||
sendQuota uint32
|
||||
oiws uint32 // outbound initial window size.
|
||||
// estdStreams is map of all established streams that are not cleaned-up yet.
|
||||
// On client-side, this is all streams whose headers were sent out.
|
||||
// On server-side, this is all streams whose headers were received.
|
||||
estdStreams map[uint32]*outStream // Established streams.
|
||||
// activeStreams is a linked-list of all streams that have data to send and some
|
||||
// stream-level flow control quota.
|
||||
// Each of these streams internally have a list of data items(and perhaps trailers
|
||||
// on the server-side) to be sent out.
|
||||
activeStreams *outStreamList
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
||||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
cbuf: cbuf,
|
||||
sendQuota: defaultWindowSize,
|
||||
oiws: defaultWindowSize,
|
||||
estdStreams: make(map[uint32]*outStream),
|
||||
activeStreams: newOutStreamList(),
|
||||
framer: fr,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
const minBatchSize = 1000
|
||||
|
||||
// run should be run in a separate goroutine.
|
||||
// It reads control frames from controlBuf and processes them by:
|
||||
// 1. Updating loopy's internal state, or/and
|
||||
// 2. Writing out HTTP2 frames on the wire.
|
||||
//
|
||||
// Loopy keeps all active streams with data to send in a linked-list.
|
||||
// All streams in the activeStreams linked-list must have both:
|
||||
// 1. Data to send, and
|
||||
// 2. Stream level flow control quota available.
|
||||
//
|
||||
// In each iteration of run loop, other than processing the incoming control
|
||||
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if err == ErrConnClosing {
|
||||
// Don't log ErrConnClosing as error since it happens
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
infof("transport: loopyWriter.run returning. %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
gosched := true
|
||||
hasdata:
|
||||
for {
|
||||
it, err := l.cbuf.get(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if it != nil {
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue hasdata
|
||||
}
|
||||
isEmpty, err := l.processData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isEmpty {
|
||||
continue hasdata
|
||||
}
|
||||
if gosched {
|
||||
gosched = false
|
||||
if l.framer.writer.offset < minBatchSize {
|
||||
runtime.Gosched()
|
||||
continue hasdata
|
||||
}
|
||||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
|
||||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
str.bytesOutStanding -= int(w.increment)
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
return l.framer.fr.WriteSettings(s.ss...)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
return nil
|
||||
}
|
||||
// Case 1.A: Server is responding back with headers.
|
||||
if !h.endStream {
|
||||
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
||||
}
|
||||
// else: Case 1.B: Server wants to close stream.
|
||||
|
||||
if str.state != empty { // either active or waiting on stream quota.
|
||||
// add it str's list of items.
|
||||
str.itl.enqueue(h)
|
||||
return nil
|
||||
}
|
||||
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.cleanupStreamHandler(h.cleanup)
|
||||
}
|
||||
// Case 2: Client wants to originate stream.
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
sendPing, err := hdr.initStream(str.id)
|
||||
if err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
return nil
|
||||
}
|
||||
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
if sendPing {
|
||||
return l.pingHandler(&ping{data: [8]byte{}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
|
||||
if onWrite != nil {
|
||||
onWrite()
|
||||
}
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
|
||||
}
|
||||
}
|
||||
var (
|
||||
err error
|
||||
endHeaders, first bool
|
||||
)
|
||||
first = true
|
||||
for !endHeaders {
|
||||
size := l.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: streamID,
|
||||
BlockFragment: l.hBuf.Next(size),
|
||||
EndStream: endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = l.framer.fr.WriteContinuation(
|
||||
streamID,
|
||||
endHeaders,
|
||||
l.hBuf.Next(size),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
str.itl.enqueue(df)
|
||||
if str.state == empty {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
if !p.ack {
|
||||
l.bdpEst.timesnap(p.data)
|
||||
}
|
||||
return l.framer.fr.WritePing(p.ack, p.data)
|
||||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
c.onWrite()
|
||||
if str, ok := l.estdStreams[c.streamID]; ok {
|
||||
// On the server side it could be a trailers-only response or
|
||||
// a RST_STREAM before stream initialization thus the stream might
|
||||
// not be established yet.
|
||||
delete(l.estdStreams, c.streamID)
|
||||
str.deleteSelf()
|
||||
}
|
||||
if c.rst { // If RST_STREAM needs to be sent.
|
||||
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
// Handling of outgoing GoAway is very specific to side.
|
||||
if l.ssGoAwayHandler != nil {
|
||||
draining, err := l.ssGoAwayHandler(g)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.draining = draining
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
return l.incomingSettingsHandler(i)
|
||||
case *outgoingSettings:
|
||||
return l.outgoingSettingsHandler(i)
|
||||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
o := l.oiws
|
||||
l.oiws = s.Val
|
||||
if o < l.oiws {
|
||||
// If the new limit is greater make all depleted streams active.
|
||||
for _, stream := range l.estdStreams {
|
||||
if stream.state == waitingOnStreamQuota {
|
||||
stream.state = active
|
||||
l.activeStreams.enqueue(stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
case http2.SettingHeaderTableSize:
|
||||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processData removes the first stream from active streams, writes out at most 16KB
|
||||
// of its data and then puts it at the end of activeStreams if there's still more data
|
||||
// to be sent and stream has some stream-level flow control.
|
||||
func (l *loopyWriter) processData() (bool, error) {
|
||||
if l.sendQuota == 0 {
|
||||
return true, nil
|
||||
}
|
||||
str := l.activeStreams.dequeue() // Remove the first stream.
|
||||
if str == nil {
|
||||
return true, nil
|
||||
}
|
||||
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
||||
// A data item is represented by a dataFrame, since it later translates into
|
||||
// multiple HTTP2 data frames.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
|
||||
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
||||
// maximum possilbe HTTP2 frame size.
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
||||
// Client sends out empty data frame with endStream = true
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str.itl.dequeue() // remove the empty data item from stream
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
var (
|
||||
idx int
|
||||
buf []byte
|
||||
)
|
||||
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
idx = 1
|
||||
buf = dataItem.d
|
||||
}
|
||||
size := http2MaxFrameLen
|
||||
if len(buf) < size {
|
||||
size = len(buf)
|
||||
}
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||
str.state = waitingOnStreamQuota
|
||||
return false, nil
|
||||
} else if strQuota < size {
|
||||
size = strQuota
|
||||
}
|
||||
|
||||
if l.sendQuota < uint32(size) { // connection-level flow control.
|
||||
size = int(l.sendQuota)
|
||||
}
|
||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||
str.wq.replenish(size)
|
||||
var endStream bool
|
||||
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||
if dataItem.endStream && size == len(buf) {
|
||||
// buf contains either data or it contains header but data is empty.
|
||||
if idx == 1 || len(dataItem.d) == 0 {
|
||||
endStream = true
|
||||
}
|
||||
}
|
||||
if dataItem.onEachWrite != nil {
|
||||
dataItem.onEachWrite()
|
||||
}
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
buf = buf[size:]
|
||||
str.bytesOutStanding += size
|
||||
l.sendQuota -= uint32(size)
|
||||
if idx == 0 {
|
||||
dataItem.h = buf
|
||||
} else {
|
||||
dataItem.d = buf
|
||||
}
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||
str.itl.dequeue()
|
||||
}
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
|
||||
str.state = waitingOnStreamQuota
|
||||
} else { // Otherwise add it back to the list of active streams.
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = 2 * time.Hour
|
||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultWriteQuota is the default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultWriteQuota = 64 * 1024
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
|
@ -22,128 +22,102 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
|
||||
defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
|
||||
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
)
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
type windowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
flush bool
|
||||
// writeQuota is a soft limit on the amount of data a stream can
|
||||
// schedule before some of it is written out.
|
||||
type writeQuota struct {
|
||||
quota int32
|
||||
// get waits on read from when quota goes less than or equal to zero.
|
||||
// replenish writes on it when quota goes positive again.
|
||||
ch chan struct{}
|
||||
// done is triggered in error case.
|
||||
done <-chan struct{}
|
||||
// replenish is called by loopyWriter to give quota back to.
|
||||
// It is implemented as a field so that it can be updated
|
||||
// by tests.
|
||||
replenish func(n int)
|
||||
}
|
||||
|
||||
func (*windowUpdate) item() {}
|
||||
|
||||
type settings struct {
|
||||
ack bool
|
||||
ss []http2.Setting
|
||||
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
||||
w := &writeQuota{
|
||||
quota: sz,
|
||||
ch: make(chan struct{}, 1),
|
||||
done: done,
|
||||
}
|
||||
w.replenish = w.realReplenish
|
||||
return w
|
||||
}
|
||||
|
||||
func (*settings) item() {}
|
||||
|
||||
type resetStream struct {
|
||||
streamID uint32
|
||||
code http2.ErrCode
|
||||
func (w *writeQuota) get(sz int32) error {
|
||||
for {
|
||||
if atomic.LoadInt32(&w.quota) > 0 {
|
||||
atomic.AddInt32(&w.quota, -sz)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*resetStream) item() {}
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
}
|
||||
|
||||
func (*goAway) item() {}
|
||||
|
||||
type flushIO struct {
|
||||
}
|
||||
|
||||
func (*flushIO) item() {}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (*ping) item() {}
|
||||
|
||||
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
||||
// when it is available.
|
||||
type quotaPool struct {
|
||||
c chan int
|
||||
|
||||
mu sync.Mutex
|
||||
quota int
|
||||
}
|
||||
|
||||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||
func newQuotaPool(q int) *quotaPool {
|
||||
qb := "aPool{
|
||||
c: make(chan int, 1),
|
||||
}
|
||||
if q > 0 {
|
||||
qb.c <- q
|
||||
} else {
|
||||
qb.quota = q
|
||||
}
|
||||
return qb
|
||||
}
|
||||
|
||||
// add cancels the pending quota sent on acquired, incremented by v and sends
|
||||
// it back on acquire.
|
||||
func (qb *quotaPool) add(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
select {
|
||||
case n := <-qb.c:
|
||||
qb.quota += n
|
||||
default:
|
||||
case <-w.ch:
|
||||
continue
|
||||
case <-w.done:
|
||||
return errStreamDone
|
||||
}
|
||||
qb.quota += v
|
||||
if qb.quota <= 0 {
|
||||
return
|
||||
}
|
||||
// After the pool has been created, this is the only place that sends on
|
||||
// the channel. Since mu is held at this point and any quota that was sent
|
||||
// on the channel has been retrieved, we know that this code will always
|
||||
// place any positive quota value on the channel.
|
||||
}
|
||||
|
||||
func (w *writeQuota) realReplenish(n int) {
|
||||
sz := int32(n)
|
||||
a := atomic.AddInt32(&w.quota, sz)
|
||||
b := a - sz
|
||||
if b <= 0 && a > 0 {
|
||||
select {
|
||||
case qb.c <- qb.quota:
|
||||
qb.quota = 0
|
||||
case w.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// acquire returns the channel on which available quota amounts are sent.
|
||||
func (qb *quotaPool) acquire() <-chan int {
|
||||
return qb.c
|
||||
}
|
||||
|
||||
type trInFlow struct {
|
||||
limit uint32
|
||||
unacked uint32
|
||||
effectiveWindowSize uint32
|
||||
}
|
||||
|
||||
func (f *trInFlow) newLimit(n uint32) uint32 {
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.updateEffectiveWindowSize()
|
||||
return d
|
||||
}
|
||||
|
||||
func (f *trInFlow) onData(n uint32) uint32 {
|
||||
f.unacked += n
|
||||
if f.unacked >= f.limit/4 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
f.updateEffectiveWindowSize()
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *trInFlow) reset() uint32 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
|
||||
func (f *trInFlow) updateEffectiveWindowSize() {
|
||||
atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
|
||||
}
|
||||
|
||||
func (f *trInFlow) getSize() uint32 {
|
||||
return atomic.LoadUint32(&f.effectiveWindowSize)
|
||||
}
|
||||
|
||||
// TODO(mmukhi): Simplify this code.
|
||||
// inFlow deals with inbound flow control
|
||||
type inFlow struct {
|
||||
mu sync.Mutex
|
||||
|
@ -164,9 +138,9 @@ type inFlow struct {
|
|||
// It assumes that n is always greater than the old limit.
|
||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.mu.Unlock()
|
||||
return d
|
||||
}
|
||||
|
||||
|
@ -175,7 +149,6 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
|||
n = uint32(math.MaxInt32)
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
||||
// can send without a window update.
|
||||
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
||||
|
@ -187,7 +160,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
|||
// for this message. Therefore we must send an update over the limit since there's an active read
|
||||
// request from the application.
|
||||
if estUntransmittedData > estSenderQuota {
|
||||
// Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
|
||||
// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
|
||||
if f.limit+n > maxWindowSize {
|
||||
f.delta = maxWindowSize - f.limit
|
||||
} else {
|
||||
|
@ -196,19 +169,24 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
|||
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
||||
f.delta = n
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return f.delta
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
// onData is invoked when some data frame is received. It updates pendingData.
|
||||
func (f *inFlow) onData(n uint32) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.pendingData += n
|
||||
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
|
||||
limit := f.limit
|
||||
rcvd := f.pendingData + f.pendingUpdate
|
||||
f.mu.Unlock()
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -216,8 +194,8 @@ func (f *inFlow) onData(n uint32) error {
|
|||
// to be sent to the peer.
|
||||
func (f *inFlow) onRead(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData == 0 {
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
f.pendingData -= n
|
||||
|
@ -232,15 +210,9 @@ func (f *inFlow) onRead(n uint32) uint32 {
|
|||
if f.pendingUpdate >= f.limit/4 {
|
||||
wu := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
f.mu.Unlock()
|
||||
return wu
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *inFlow) resetPendingUpdate() uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
n := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
return n
|
||||
}
|
|
@ -22,8 +22,10 @@ package transport
|
|||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -33,13 +35,18 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error)
|
|||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
// ContextErr converts the error from context package into a status error.
|
||||
func ContextErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// contextFromRequest returns a background context.
|
||||
func contextFromRequest(r *http.Request) context.Context {
|
||||
return context.Background()
|
||||
}
|
|
@ -23,8 +23,10 @@ package transport
|
|||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
)
|
||||
|
@ -34,13 +36,18 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error)
|
|||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
// ContextErr converts the error from context package into a status error.
|
||||
func ContextErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// contextFromRequest returns a context from the HTTP Request.
|
||||
func contextFromRequest(r *http.Request) context.Context {
|
||||
return r.Context()
|
||||
}
|
|
@ -33,26 +33,31 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
}
|
||||
if !validContentType(r.Header.Get("Content-Type")) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := contentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
|
@ -67,30 +72,33 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
|
|||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
contentType: contentType,
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
var metakv []string
|
||||
metakv := []string{"content-type", contentType}
|
||||
if r.Host != "" {
|
||||
metakv = append(metakv, ":authority", r.Host)
|
||||
}
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
|
||||
if isReservedHeader(k) && !isWhitelistedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
|
@ -121,6 +129,18 @@ type serverHandlerTransport struct {
|
|||
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||
// when WriteStatus is called.
|
||||
writes chan func()
|
||||
|
||||
// block concurrent WriteStatus calls
|
||||
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
|
||||
writeStatusMu sync.Mutex
|
||||
|
||||
// we just mirror the request content-type
|
||||
contentType string
|
||||
// we store both contentType and contentSubtype so we don't keep recreating them
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
|
@ -167,11 +187,13 @@ func (ht *serverHandlerTransport) do(fn func()) error {
|
|||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
|
||||
ht.writeStatusMu.Lock()
|
||||
defer ht.writeStatusMu.Unlock()
|
||||
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
|
||||
|
@ -186,7 +208,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
h.Set("Grpc-Message", encodeGrpcMessage(m))
|
||||
}
|
||||
|
||||
// TODO: Support Grpc-Status-Details-Bin
|
||||
if p := st.Proto(); p != nil && len(p.Details) > 0 {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
|
||||
}
|
||||
|
||||
if md := s.Trailer(); len(md) > 0 {
|
||||
for k, vv := range md {
|
||||
|
@ -202,7 +232,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
}
|
||||
}
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
}
|
||||
ht.Close()
|
||||
close(ht.writes)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -216,7 +253,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
|||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", "application/grpc")
|
||||
h.Set("Content-Type", ht.contentType)
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
|
@ -225,25 +262,24 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
|||
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||
h.Add("Trailer", "Grpc-Status")
|
||||
h.Add("Trailer", "Grpc-Message")
|
||||
// TODO: Support Grpc-Status-Details-Bin
|
||||
h.Add("Trailer", "Grpc-Status-Details-Bin")
|
||||
|
||||
if s.sendCompress != "" {
|
||||
h.Set("Grpc-Encoding", s.sendCompress)
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.rw.Write(hdr)
|
||||
ht.rw.Write(data)
|
||||
if !opts.Delay {
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
return ht.do(func() {
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
|
@ -259,17 +295,24 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
var ctx context.Context
|
||||
ctx := contextFromRequest(ht.req)
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
|
||||
// requestOver is closed when either the request's context is done
|
||||
|
@ -300,6 +343,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
contentSubtype: ht.contentSubtype,
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
|
@ -308,10 +352,18 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
s.ctx = newContextWithStream(ctx, s)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
|
||||
windowHandler: func(int) {},
|
||||
}
|
||||
|
||||
|
@ -366,6 +418,10 @@ func (ht *serverHandlerTransport) runStream() {
|
|||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
@ -376,17 +432,14 @@ func (ht *serverHandlerTransport) Drain() {
|
|||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
// * an error from the status package
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||
return StreamError{
|
||||
Code: code,
|
||||
Desc: se.Error(),
|
||||
}
|
||||
return status.Error(code, se.Error())
|
||||
}
|
||||
}
|
||||
return connectionErrorf(true, err, err.Error())
|
1360
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
Normal file
1360
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -28,8 +28,8 @@ import (
|
|||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
|
@ -44,8 +44,12 @@ const (
|
|||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
// http2IOBufSize specifies the buffer size for sending frames.
|
||||
http2IOBufSize = 32 * 1024
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
baseContentType = "application/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -64,7 +68,7 @@ var (
|
|||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
||||
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
|
@ -112,6 +116,11 @@ type decodeState struct {
|
|||
method string
|
||||
// key-value metadata map from the peer.
|
||||
mdata map[string][]string
|
||||
statsTags []byte
|
||||
statsTrace []byte
|
||||
contentSubtype string
|
||||
// whether decoding on server side or not
|
||||
serverSide bool
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
|
@ -123,12 +132,16 @@ func isReservedHeader(hdr string) bool {
|
|||
}
|
||||
switch hdr {
|
||||
case "content-type",
|
||||
"user-agent",
|
||||
"grpc-message-type",
|
||||
"grpc-encoding",
|
||||
"grpc-message",
|
||||
"grpc-status",
|
||||
"grpc-timeout",
|
||||
"grpc-status-details-bin",
|
||||
// Intentionally exclude grpc-previous-rpc-attempts and
|
||||
// grpc-retry-pushback-ms, which are "reserved", but their API
|
||||
// intentionally works via metadata.
|
||||
"te":
|
||||
return true
|
||||
default:
|
||||
|
@ -136,28 +149,55 @@ func isReservedHeader(hdr string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
|
||||
// that should be propagated into metadata visible to users.
|
||||
func isWhitelistedPseudoHeader(hdr string) bool {
|
||||
// isWhitelistedHeader checks whether hdr should be propagated into metadata
|
||||
// visible to users, even though it is classified as "reserved", above.
|
||||
func isWhitelistedHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case ":authority":
|
||||
case ":authority", "user-agent":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func validContentType(t string) bool {
|
||||
e := "application/grpc"
|
||||
if !strings.HasPrefix(t, e) {
|
||||
return false
|
||||
// contentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func contentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
// Support variations on the content-type
|
||||
// (e.g. "application/grpc+blah", "application/grpc;blah").
|
||||
if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
|
||||
return false
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
return true
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func contentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
||||
|
||||
func (d *decodeState) status() *status.Status {
|
||||
|
@ -197,13 +237,22 @@ func decodeMetadataHeader(k, v string) (string, error) {
|
|||
return v, nil
|
||||
}
|
||||
|
||||
func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
|
||||
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
// list size hits MaxHeaderListSize limit.
|
||||
if frame.Truncated {
|
||||
return status.Error(codes.Internal, "peer header list size exceeded limit")
|
||||
}
|
||||
for _, hf := range frame.Fields {
|
||||
if err := d.processHeaderField(hf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.serverSide {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If grpc status exists, no need to check further.
|
||||
if d.rawStatusCode != nil || d.statusGen != nil {
|
||||
return nil
|
||||
|
@ -212,7 +261,7 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
|
|||
// If grpc status doesn't exist and http status doesn't exist,
|
||||
// then it's a malformed header.
|
||||
if d.httpStatus == nil {
|
||||
return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
|
||||
return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
|
||||
}
|
||||
|
||||
if *(d.httpStatus) != http.StatusOK {
|
||||
|
@ -220,33 +269,46 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
|
|||
if !ok {
|
||||
code = codes.Unknown
|
||||
}
|
||||
return streamErrorf(code, http.StatusText(*(d.httpStatus)))
|
||||
return status.Error(code, http.StatusText(*(d.httpStatus)))
|
||||
}
|
||||
|
||||
// gRPC status doesn't exist and http status is OK.
|
||||
// Set rawStatusCode to be unknown and return nil error.
|
||||
// So that, if the stream has ended this Unknown status
|
||||
// will be propogated to the user.
|
||||
// will be propagated to the user.
|
||||
// Otherwise, it will be ignored. In which case, status from
|
||||
// a later trailer, that has StreamEnded flag set, is propogated.
|
||||
// a later trailer, that has StreamEnded flag set, is propagated.
|
||||
code := int(codes.Unknown)
|
||||
d.rawStatusCode = &code
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decodeState) addMetadata(k, v string) {
|
||||
if d.mdata == nil {
|
||||
d.mdata = make(map[string][]string)
|
||||
}
|
||||
d.mdata[k] = append(d.mdata[k], v)
|
||||
}
|
||||
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
if !validContentType(f.Value) {
|
||||
return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
|
||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||
if !validContentType {
|
||||
return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
|
||||
}
|
||||
d.contentSubtype = contentSubtype
|
||||
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||
// or come up with a way to just propagate the content-subtype if it was set?
|
||||
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
||||
// in the metadata?
|
||||
d.addMetadata(f.Name, f.Value)
|
||||
case "grpc-encoding":
|
||||
d.encoding = f.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||
}
|
||||
d.rawStatusCode = &code
|
||||
case "grpc-message":
|
||||
|
@ -254,39 +316,51 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
|||
case "grpc-status-details-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
}
|
||||
s := &spb.Status{}
|
||||
if err := proto.Unmarshal(v, s); err != nil {
|
||||
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||
}
|
||||
d.statusGen = status.FromProto(s)
|
||||
case "grpc-timeout":
|
||||
d.timeoutSet = true
|
||||
var err error
|
||||
if d.timeout, err = decodeTimeout(f.Value); err != nil {
|
||||
return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||
return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||
}
|
||||
case ":path":
|
||||
d.method = f.Value
|
||||
case ":status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
|
||||
return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
|
||||
}
|
||||
d.httpStatus = &code
|
||||
case "grpc-tags-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
|
||||
}
|
||||
d.statsTags = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
case "grpc-trace-bin":
|
||||
v, err := decodeBinHeader(f.Value)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
|
||||
}
|
||||
d.statsTrace = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
default:
|
||||
if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
|
||||
if d.mdata == nil {
|
||||
d.mdata = make(map[string][]string)
|
||||
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||
if err != nil {
|
||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||
return nil
|
||||
}
|
||||
d.mdata[f.Name] = append(d.mdata[f.Name], v)
|
||||
}
|
||||
d.addMetadata(f.Name, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -375,16 +449,17 @@ func decodeTimeout(s string) (time.Duration, error) {
|
|||
|
||||
const (
|
||||
spaceByte = ' '
|
||||
tildaByte = '~'
|
||||
tildeByte = '~'
|
||||
percentByte = '%'
|
||||
)
|
||||
|
||||
// encodeGrpcMessage is used to encode status code in header field
|
||||
// "grpc-message".
|
||||
// It checks to see if each individual byte in msg is an
|
||||
// allowable byte, and then either percent encoding or passing it through.
|
||||
// When percent encoding, the byte is converted into hexadecimal notation
|
||||
// with a '%' prepended.
|
||||
// "grpc-message". It does percent encoding and also replaces invalid utf-8
|
||||
// characters with Unicode replacement character.
|
||||
//
|
||||
// It checks to see if each individual byte in msg is an allowable byte, and
|
||||
// then either percent encoding or passing it through. When percent encoding,
|
||||
// the byte is converted into hexadecimal notation with a '%' prepended.
|
||||
func encodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
|
@ -392,7 +467,7 @@ func encodeGrpcMessage(msg string) string {
|
|||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if !(c >= spaceByte && c < tildaByte && c != percentByte) {
|
||||
if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
|
||||
return encodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
|
@ -401,14 +476,26 @@ func encodeGrpcMessage(msg string) string {
|
|||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c >= spaceByte && c < tildaByte && c != percentByte {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
for len(msg) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(msg)
|
||||
for _, b := range []byte(string(r)) {
|
||||
if size > 1 {
|
||||
// If size > 1, r is not ascii. Always do percent encoding.
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
continue
|
||||
}
|
||||
|
||||
// The for loop is necessary even if size == 1. r could be
|
||||
// utf8.RuneError.
|
||||
//
|
||||
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||
buf.WriteByte(b)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
}
|
||||
}
|
||||
msg = msg[size:]
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
@ -447,151 +534,80 @@ func decodeGrpcMessageUnchecked(msg string) string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
type bufWriter struct {
|
||||
buf []byte
|
||||
offset int
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
return &bufWriter{
|
||||
buf: make([]byte, batchSize*2),
|
||||
batchSize: batchSize,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.batchSize == 0 { // Buffer has been disabled.
|
||||
return w.conn.Write(b)
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
b = b[nn:]
|
||||
w.offset += nn
|
||||
n += nn
|
||||
if w.offset >= w.batchSize {
|
||||
err = w.Flush()
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bufWriter) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
numWriters int32
|
||||
reader io.Reader
|
||||
writer *bufio.Writer
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn) *framer {
|
||||
f := &framer{
|
||||
reader: bufio.NewReaderSize(conn, http2IOBufSize),
|
||||
writer: bufio.NewWriterSize(conn, http2IOBufSize),
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
||||
if writeBufferSize < 0 {
|
||||
writeBufferSize = 0
|
||||
}
|
||||
var r io.Reader = conn
|
||||
if readBufferSize > 0 {
|
||||
r = bufio.NewReaderSize(r, readBufferSize)
|
||||
}
|
||||
w := newBufWriter(conn, writeBufferSize)
|
||||
f := &framer{
|
||||
writer: w,
|
||||
fr: http2.NewFramer(w, r),
|
||||
}
|
||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||
f.fr.SetReuseFrames()
|
||||
f.fr.MaxHeaderListSize = maxHeaderListSize
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *framer) adjustNumWriters(i int32) int32 {
|
||||
return atomic.AddInt32(&f.numWriters, i)
|
||||
}
|
||||
|
||||
// The following writeXXX functions can only be called when the caller gets
|
||||
// unblocked from writableChan channel (i.e., owns the privilege to write).
|
||||
|
||||
func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
|
||||
if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
|
||||
if err := f.fr.WriteData(streamID, endStream, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
|
||||
if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
|
||||
if err := f.fr.WriteHeaders(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
|
||||
if err := f.fr.WritePing(ack, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
|
||||
if err := f.fr.WritePriority(streamID, p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
|
||||
if err := f.fr.WritePushPromise(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
|
||||
if err := f.fr.WriteRSTStream(streamID, code); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
|
||||
if err := f.fr.WriteSettings(settings...); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeSettingsAck(forceFlush bool) error {
|
||||
if err := f.fr.WriteSettingsAck(); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
|
||||
if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) flushWrite() error {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
|
||||
func (f *framer) readFrame() (http2.Frame, error) {
|
||||
return f.fr.ReadFrame()
|
||||
}
|
||||
|
||||
func (f *framer) errorDetail() error {
|
||||
return f.fr.ErrorDetail()
|
||||
}
|
|
@ -17,17 +17,19 @@
|
|||
*/
|
||||
|
||||
// Package transport defines and implements message oriented communication
|
||||
// channel to complete various transactions (e.g., an RPC).
|
||||
package transport // import "google.golang.org/grpc/transport"
|
||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||
// grpc-internal usage and is not intended to be imported directly by users.
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
@ -56,6 +58,7 @@ type recvBuffer struct {
|
|||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
backlog []recvMsg
|
||||
err error
|
||||
}
|
||||
|
||||
func newRecvBuffer() *recvBuffer {
|
||||
|
@ -67,20 +70,27 @@ func newRecvBuffer() *recvBuffer {
|
|||
|
||||
func (b *recvBuffer) put(r recvMsg) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.err != nil {
|
||||
b.mu.Unlock()
|
||||
// An error had occurred earlier, don't accept more
|
||||
// data or errors.
|
||||
return
|
||||
}
|
||||
b.err = r.err
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *recvBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
|
@ -89,6 +99,7 @@ func (b *recvBuffer) load() {
|
|||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// get returns the channel that receives a recvMsg in the buffer.
|
||||
|
@ -99,11 +110,12 @@ func (b *recvBuffer) get() <-chan recvMsg {
|
|||
return b.c
|
||||
}
|
||||
|
||||
//
|
||||
// recvBufferReader implements io.Reader interface to read the data from
|
||||
// recvBuffer.
|
||||
type recvBufferReader struct {
|
||||
ctx context.Context
|
||||
goAway chan struct{}
|
||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||
recv *recvBuffer
|
||||
last []byte // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
|
@ -128,10 +140,8 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
|||
return copied, nil
|
||||
}
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
case <-r.ctxDone:
|
||||
return 0, ContextErr(r.ctx.Err())
|
||||
case <-r.goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case m := <-r.recv.get():
|
||||
r.recv.load()
|
||||
if m.err != nil {
|
||||
|
@ -143,60 +153,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// All items in an out of a controlBuffer should be the same type.
|
||||
type item interface {
|
||||
item()
|
||||
}
|
||||
|
||||
// controlBuffer is an unbounded channel of item.
|
||||
type controlBuffer struct {
|
||||
c chan item
|
||||
mu sync.Mutex
|
||||
backlog []item
|
||||
}
|
||||
|
||||
func newControlBuffer() *controlBuffer {
|
||||
b := &controlBuffer{
|
||||
c: make(chan item, 1),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *controlBuffer) put(r item) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
}
|
||||
|
||||
func (b *controlBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that receives an item in the buffer.
|
||||
//
|
||||
// Upon receipt of an item, the caller should call load to send another
|
||||
// item onto the channel if there is any.
|
||||
func (b *controlBuffer) get() <-chan item {
|
||||
return b.c
|
||||
}
|
||||
|
||||
type streamState uint8
|
||||
type streamState uint32
|
||||
|
||||
const (
|
||||
streamActive streamState = iota
|
||||
|
@ -208,64 +165,94 @@ const (
|
|||
// Stream represents an RPC in the transport layer.
|
||||
type Stream struct {
|
||||
id uint32
|
||||
// nil for client side Stream.
|
||||
st ServerTransport
|
||||
// ctx is the associated context of the stream.
|
||||
ctx context.Context
|
||||
// cancel is always nil for client side Stream.
|
||||
cancel context.CancelFunc
|
||||
// done is closed when the final status arrives.
|
||||
done chan struct{}
|
||||
// goAway is closed when the server sent GoAways signal before this stream was initiated.
|
||||
goAway chan struct{}
|
||||
// method records the associated RPC method of the stream.
|
||||
method string
|
||||
st ServerTransport // nil for client side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
||||
method string // the associated RPC method of the stream
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
trReader io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
|
||||
// TODO: Remote this unused variable.
|
||||
// The accumulated inbound quota pending for window update.
|
||||
updateQuota uint32
|
||||
wq *writeQuota
|
||||
|
||||
// Callback to state application's intentions to read data. This
|
||||
// is used to adjust flow control, if need be.
|
||||
// is used to adjust flow control, if needed.
|
||||
requestRead func(int)
|
||||
|
||||
sendQuotaPool *quotaPool
|
||||
// Close headerChan to indicate the end of reception of header metadata.
|
||||
headerChan chan struct{}
|
||||
// header caches the received header metadata.
|
||||
header metadata.MD
|
||||
// The key-value map of trailer metadata.
|
||||
trailer metadata.MD
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
header metadata.MD // the received header metadata.
|
||||
trailer metadata.MD // the key-value map of trailer metadata.
|
||||
|
||||
noHeaders bool // set if the client never received headers (set only after the stream is done).
|
||||
|
||||
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
||||
headerSent uint32
|
||||
|
||||
mu sync.RWMutex // guard the following
|
||||
// headerOK becomes true from the first header is about to send.
|
||||
headerOk bool
|
||||
state streamState
|
||||
// true iff headerChan is closed. Used to avoid closing headerChan
|
||||
// multiple times.
|
||||
headerDone bool
|
||||
// the status error received from the server.
|
||||
|
||||
// On client-side it is the status error received from the server.
|
||||
// On server-side it is unused.
|
||||
status *status.Status
|
||||
// rstStream indicates whether a RST_STREAM frame needs to be sent
|
||||
// to the server to signify that this stream is closing.
|
||||
rstStream bool
|
||||
// rstError is the error that needs to be sent along with the RST_STREAM frame.
|
||||
rstError http2.ErrCode
|
||||
// bytesSent and bytesReceived indicates whether any bytes have been sent or
|
||||
// received on this stream.
|
||||
bytesSent bool
|
||||
bytesReceived bool
|
||||
|
||||
bytesReceived uint32 // indicates whether any bytes have been received on this stream
|
||||
unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
|
||||
|
||||
// contentSubtype is the content-subtype for requests.
|
||||
// this must be lowercase or the behavior is undefined.
|
||||
contentSubtype string
|
||||
}
|
||||
|
||||
// isHeaderSent is only valid on the server-side.
|
||||
func (s *Stream) isHeaderSent() bool {
|
||||
return atomic.LoadUint32(&s.headerSent) == 1
|
||||
}
|
||||
|
||||
// updateHeaderSent updates headerSent and returns true
|
||||
// if it was alreay set. It is valid only on server-side.
|
||||
func (s *Stream) updateHeaderSent() bool {
|
||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
||||
}
|
||||
|
||||
func (s *Stream) swapState(st streamState) streamState {
|
||||
return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
|
||||
}
|
||||
|
||||
func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
|
||||
}
|
||||
|
||||
func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
|
@ -274,53 +261,59 @@ func (s *Stream) SetSendCompress(str string) {
|
|||
s.sendCompress = str
|
||||
}
|
||||
|
||||
// Done returns a chanel which is closed when it receives the final status
|
||||
// Done returns a channel which is closed when it receives the final status
|
||||
// from the server.
|
||||
func (s *Stream) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
||||
|
||||
// GoAway returns a channel which is closed when the server sent GoAways signal
|
||||
// before this stream was initiated.
|
||||
func (s *Stream) GoAway() <-chan struct{} {
|
||||
return s.goAway
|
||||
}
|
||||
|
||||
// Header acquires the key-value pairs of header metadata once it
|
||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||
// header metadata or iii) the stream is canceled/expired.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
var err error
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
err = ContextErr(s.ctx.Err())
|
||||
case <-s.goAway:
|
||||
err = ErrStreamDrain
|
||||
case <-s.headerChan:
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. If a context error happens
|
||||
// first, returns it as a status error. Client-side only.
|
||||
func (s *Stream) TrailersOnly() (bool, error) {
|
||||
err := s.waitOnHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// if !headerDone, some other connection error occurred.
|
||||
return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
// after the entire stream is done, it could return an empty MD. Client
|
||||
// side only.
|
||||
// It can be safely read only after stream has ended that is either read
|
||||
// or write have returned io.EOF.
|
||||
func (s *Stream) Trailer() metadata.MD {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.trailer.Copy()
|
||||
c := s.trailer.Copy()
|
||||
return c
|
||||
}
|
||||
|
||||
// ServerTransport returns the underlying ServerTransport for the stream.
|
||||
// The client side stream always returns nil.
|
||||
func (s *Stream) ServerTransport() ServerTransport {
|
||||
return s.st
|
||||
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". This will always be lowercase. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
func (s *Stream) ContentSubtype() string {
|
||||
return s.contentSubtype
|
||||
}
|
||||
|
||||
// Context returns the context of the stream.
|
||||
|
@ -334,34 +327,48 @@ func (s *Stream) Method() string {
|
|||
}
|
||||
|
||||
// Status returns the status received from the server.
|
||||
// Status can be read safely only after the stream has ended,
|
||||
// that is, after Done() is closed.
|
||||
func (s *Stream) Status() *status.Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata. This can be called multiple times.
|
||||
// Server side only.
|
||||
// This should not be called in parallel to other data writes.
|
||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.headerOk || s.state == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.isHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendHeader sends the given header metadata. The given metadata is
|
||||
// combined with any metadata set by previous calls to SetHeader and
|
||||
// then written to the transport stream.
|
||||
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||
return s.st.WriteHeader(s, md)
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||
// by the server. This can be called multiple times. Server side only.
|
||||
// This should not be called parallel to other data writes.
|
||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -401,26 +408,15 @@ func (t *transportReader) Read(p []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// finish sets the stream's state and status, and closes the done channel.
|
||||
// s.mu must be held by the caller. st must always be non-nil.
|
||||
func (s *Stream) finish(st *status.Status) {
|
||||
s.status = st
|
||||
s.state = streamDone
|
||||
close(s.done)
|
||||
}
|
||||
|
||||
// BytesSent indicates whether any bytes have been sent on this stream.
|
||||
func (s *Stream) BytesSent() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.bytesSent
|
||||
}
|
||||
|
||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||
func (s *Stream) BytesReceived() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.bytesReceived
|
||||
return atomic.LoadUint32(&s.bytesReceived) == 1
|
||||
}
|
||||
|
||||
// Unprocessed indicates whether the server did not process this stream --
|
||||
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
||||
func (s *Stream) Unprocessed() bool {
|
||||
return atomic.LoadUint32(&s.unprocessed) == 1
|
||||
}
|
||||
|
||||
// GoString is implemented by Stream so context.String() won't
|
||||
|
@ -429,27 +425,11 @@ func (s *Stream) GoString() string {
|
|||
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
||||
}
|
||||
|
||||
// The key to save transport.Stream in the context.
|
||||
type streamKey struct{}
|
||||
|
||||
// newContextWithStream creates a new context from ctx and attaches stream
|
||||
// to it.
|
||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
||||
return context.WithValue(ctx, streamKey{}, stream)
|
||||
}
|
||||
|
||||
// StreamFromContext returns the stream saved in ctx.
|
||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
||||
return
|
||||
}
|
||||
|
||||
// state of transport
|
||||
type transportState int
|
||||
|
||||
const (
|
||||
reachable transportState = iota
|
||||
unreachable
|
||||
closing
|
||||
draining
|
||||
)
|
||||
|
@ -464,6 +444,10 @@ type ServerConfig struct {
|
|||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
|
@ -476,9 +460,6 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S
|
|||
type ConnectOptions struct {
|
||||
// UserAgent is the application user agent.
|
||||
UserAgent string
|
||||
// Authority is the :authority pseudo-header to use. This field has no effect if
|
||||
// TransportCredentials is set.
|
||||
Authority string
|
||||
// Dialer specifies how to dial a network address.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
|
||||
|
@ -491,22 +472,31 @@ type ConnectOptions struct {
|
|||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandler stores the handler for stats.
|
||||
StatsHandler stats.Handler
|
||||
// InitialWindowSize sets the intial window size for a stream.
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the intial window size for a connection.
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
InitialConnWindowSize int32
|
||||
// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
|
||||
WriteBufferSize int
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID int64
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
||||
type TargetInfo struct {
|
||||
Addr string
|
||||
Metadata interface{}
|
||||
Authority string
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) {
|
||||
return newHTTP2Client(ctx, target, opts)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
|
@ -515,11 +505,6 @@ type Options struct {
|
|||
// Last indicates whether this write is the last piece for
|
||||
// this stream.
|
||||
Last bool
|
||||
|
||||
// Delay is a hint to the transport implementation for whether
|
||||
// the data could be buffered for a batching write. The
|
||||
// Transport implementation may ignore the hint.
|
||||
Delay bool
|
||||
}
|
||||
|
||||
// CallHdr carries the information of a particular RPC.
|
||||
|
@ -530,10 +515,6 @@ type CallHdr struct {
|
|||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// RecvCompress specifies the compression algorithm applied on
|
||||
// inbound messages.
|
||||
RecvCompress string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
|
@ -541,13 +522,15 @@ type CallHdr struct {
|
|||
// Creds specifies credentials.PerRPCCredentials for a call.
|
||||
Creds credentials.PerRPCCredentials
|
||||
|
||||
// Flush indicates whether a new stream command should be sent
|
||||
// to the peer without waiting for the first data. This is
|
||||
// only a hint.
|
||||
// If it's true, the transport may modify the flush decision
|
||||
// for performance purposes.
|
||||
// If it's false, new stream will never be flushed.
|
||||
Flush bool
|
||||
// ContentSubtype specifies the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". The value of ContentSubtype must be all
|
||||
// lowercase, otherwise the behavior is undefined. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
ContentSubtype string
|
||||
|
||||
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
|
@ -564,7 +547,7 @@ type ClientTransport interface {
|
|||
|
||||
// Write sends the data for the given stream. A nil stream indicates
|
||||
// the write is to be performed on the transport as a whole.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
|
||||
|
||||
// NewStream creates a Stream for an RPC.
|
||||
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
|
||||
|
@ -589,6 +572,12 @@ type ClientTransport interface {
|
|||
|
||||
// GetGoAwayReason returns the reason why GoAway frame was received.
|
||||
GetGoAwayReason() GoAwayReason
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
|
@ -606,7 +595,7 @@ type ServerTransport interface {
|
|||
|
||||
// Write sends the data for the given stream.
|
||||
// Write may not be called on all streams.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
|
||||
|
||||
// WriteStatus sends the status of a stream to the client. WriteStatus is
|
||||
// the final call made on a stream and always occurs.
|
||||
|
@ -622,14 +611,12 @@ type ServerTransport interface {
|
|||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
}
|
||||
|
||||
// streamErrorf creates an StreamError with the specified error code and description.
|
||||
func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
|
||||
return StreamError{
|
||||
Code: c,
|
||||
Desc: fmt.Sprintf(format, a...),
|
||||
}
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||
|
@ -671,60 +658,28 @@ func (e ConnectionError) Origin() error {
|
|||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||
// ErrStreamDrain indicates that the stream is rejected by the server because
|
||||
// the server stops accepting new RPCs.
|
||||
ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
|
||||
// errStreamDrain indicates that the stream is rejected because the
|
||||
// connection is draining. This could be caused by goaway or balancer
|
||||
// removing the address.
|
||||
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
|
||||
// errStreamDone is returned from write at the client side to indiacte application
|
||||
// layer of an error.
|
||||
errStreamDone = errors.New("the stream is done")
|
||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||
// stream's ID in unprocessed RPCs.
|
||||
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||
)
|
||||
|
||||
// TODO: See if we can replace StreamError with status package errors.
|
||||
|
||||
// StreamError is an error that only affects one stream within a connection.
|
||||
type StreamError struct {
|
||||
Code codes.Code
|
||||
Desc string
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
||||
}
|
||||
|
||||
// wait blocks until it can receive from ctx.Done, closing, or proceed.
|
||||
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
|
||||
// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
|
||||
// it return the StreamError for ctx.Err.
|
||||
// If it receives from goAway, it returns 0, ErrStreamDrain.
|
||||
// If it receives from closing, it returns 0, ErrConnClosing.
|
||||
// If it receives from proceed, it returns the received integer, nil.
|
||||
func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
case <-done:
|
||||
// User cancellation has precedence.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
default:
|
||||
}
|
||||
return 0, io.EOF
|
||||
case <-goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case <-closing:
|
||||
return 0, ErrConnClosing
|
||||
case i := <-proceed:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// GoAwayReason contains the reason for the GoAway frame received.
|
||||
type GoAwayReason uint8
|
||||
|
||||
const (
|
||||
// Invalid indicates that no GoAway frame is received.
|
||||
Invalid GoAwayReason = 0
|
||||
// NoReason is the default value when GoAway frame is received.
|
||||
NoReason GoAwayReason = 1
|
||||
// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
|
||||
// was recieved and that the debug data said "too_many_pings".
|
||||
TooManyPings GoAwayReason = 2
|
||||
// GoAwayInvalid indicates that no GoAway frame is received.
|
||||
GoAwayInvalid GoAwayReason = 0
|
||||
// GoAwayNoReason is the default value when GoAway frame is received.
|
||||
GoAwayNoReason GoAwayReason = 1
|
||||
// GoAwayTooManyPings indicates that a GoAway frame with
|
||||
// ErrCodeEnhanceYourCalm was received and that the debug data said
|
||||
// "too_many_pings".
|
||||
GoAwayTooManyPings GoAwayReason = 2
|
||||
)
|
|
@ -17,7 +17,8 @@
|
|||
*/
|
||||
|
||||
// Package metadata define the structure of the metadata supported by gRPC library.
|
||||
// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
|
||||
// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||
// for more information about custom-metadata.
|
||||
package metadata // import "google.golang.org/grpc/metadata"
|
||||
|
||||
import (
|
||||
|
@ -27,7 +28,9 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
|
||||
// DecodeKeyValue returns k, v, nil.
|
||||
//
|
||||
// Deprecated: use k and v directly instead.
|
||||
func DecodeKeyValue(k, v string) (string, string, error) {
|
||||
return k, v, nil
|
||||
}
|
||||
|
@ -44,6 +47,9 @@ type MD map[string][]string
|
|||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
// Uppercase letters are automatically converted to lowercase.
|
||||
//
|
||||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||||
// result in errors if set in metadata.
|
||||
func New(m map[string]string) MD {
|
||||
md := MD{}
|
||||
for k, val := range m {
|
||||
|
@ -62,6 +68,9 @@ func New(m map[string]string) MD {
|
|||
// - lowercase letters: a-z
|
||||
// - special characters: -_.
|
||||
// Uppercase letters are automatically converted to lowercase.
|
||||
//
|
||||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||||
// result in errors if set in metadata.
|
||||
func Pairs(kv ...string) MD {
|
||||
if len(kv)%2 == 1 {
|
||||
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
||||
|
@ -88,6 +97,30 @@ func (md MD) Copy() MD {
|
|||
return Join(md)
|
||||
}
|
||||
|
||||
// Get obtains the values for a given key.
|
||||
func (md MD) Get(k string) []string {
|
||||
k = strings.ToLower(k)
|
||||
return md[k]
|
||||
}
|
||||
|
||||
// Set sets the value of a given key with a slice of values.
|
||||
func (md MD) Set(k string, vals ...string) {
|
||||
if len(vals) == 0 {
|
||||
return
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
md[k] = vals
|
||||
}
|
||||
|
||||
// Append adds the values to key k, not overwriting what was already stored at that key.
|
||||
func (md MD) Append(k string, vals ...string) {
|
||||
if len(vals) == 0 {
|
||||
return
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
md[k] = append(md[k], vals...)
|
||||
}
|
||||
|
||||
// Join joins any number of mds into a single MD.
|
||||
// The order of values for each key is determined by the order in which
|
||||
// the mds containing those values are presented to Join.
|
||||
|
@ -104,24 +137,31 @@ func Join(mds ...MD) MD {
|
|||
type mdIncomingKey struct{}
|
||||
type mdOutgoingKey struct{}
|
||||
|
||||
// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
|
||||
func NewContext(ctx context.Context, md MD) context.Context {
|
||||
return NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// NewIncomingContext creates a new context with incoming md attached.
|
||||
func NewIncomingContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdIncomingKey{}, md)
|
||||
}
|
||||
|
||||
// NewOutgoingContext creates a new context with outgoing md attached.
|
||||
// NewOutgoingContext creates a new context with outgoing md attached. If used
|
||||
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
|
||||
// overwrite any previously-appended metadata.
|
||||
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, md)
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
|
||||
}
|
||||
|
||||
// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated.
|
||||
func FromContext(ctx context.Context) (md MD, ok bool) {
|
||||
return FromIncomingContext(ctx)
|
||||
// AppendToOutgoingContext returns a new context with the provided kv merged
|
||||
// with any existing metadata in the context. Please refer to the
|
||||
// documentation of Pairs for a description of kv.
|
||||
func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
|
||||
if len(kv)%2 == 1 {
|
||||
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
||||
}
|
||||
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
added := make([][]string, len(md.added)+1)
|
||||
copy(added, md.added)
|
||||
added[len(added)-1] = make([]string, len(kv))
|
||||
copy(added[len(added)-1], kv)
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
||||
}
|
||||
|
||||
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
|
||||
|
@ -132,10 +172,39 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
|
|||
return
|
||||
}
|
||||
|
||||
// FromOutgoingContextRaw returns the un-merged, intermediary contents
|
||||
// of rawMD. Remember to perform strings.ToLower on the keys. The returned
|
||||
// MD should not be modified. Writing to it may cause races. Modification
|
||||
// should be made to copies of the returned MD.
|
||||
//
|
||||
// This is intended for gRPC-internal use ONLY.
|
||||
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
|
||||
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
return raw.md, raw.added, true
|
||||
}
|
||||
|
||||
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
|
||||
// returned MD should not be modified. Writing to it may cause races.
|
||||
// Modification should be made to the copies of the returned MD.
|
||||
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
|
||||
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
|
||||
return
|
||||
// Modification should be made to copies of the returned MD.
|
||||
func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
mds := make([]MD, 0, len(raw.added)+1)
|
||||
mds = append(mds, raw.md)
|
||||
for _, vv := range raw.added {
|
||||
mds = append(mds, Pairs(vv...))
|
||||
}
|
||||
return Join(mds...), ok
|
||||
}
|
||||
|
||||
type rawMD struct {
|
||||
md MD
|
||||
added [][]string
|
||||
}
|
||||
|
|
|
@ -141,8 +141,8 @@ type dnsWatcher struct {
|
|||
r *dnsResolver
|
||||
host string
|
||||
port string
|
||||
// The latest resolved address list
|
||||
curAddrs []*Update
|
||||
// The latest resolved address set
|
||||
curAddrs map[string]*Update
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
t *time.Timer
|
||||
|
@ -153,10 +153,10 @@ type ipWatcher struct {
|
|||
updateChan chan *Update
|
||||
}
|
||||
|
||||
// Next returns the adrress resolution Update for the target. For IP address,
|
||||
// the resolution is itself, thus polling name server is unncessary. Therefore,
|
||||
// Next returns the address resolution Update for the target. For IP address,
|
||||
// the resolution is itself, thus polling name server is unnecessary. Therefore,
|
||||
// Next() will return an Update the first time it is called, and will be blocked
|
||||
// for all following calls as no Update exisits until watcher is closed.
|
||||
// for all following calls as no Update exists until watcher is closed.
|
||||
func (i *ipWatcher) Next() ([]*Update, error) {
|
||||
u, ok := <-i.updateChan
|
||||
if !ok {
|
||||
|
@ -192,28 +192,24 @@ type AddrMetadataGRPCLB struct {
|
|||
|
||||
// compileUpdate compares the old resolved addresses and newly resolved addresses,
|
||||
// and generates an update list
|
||||
func (w *dnsWatcher) compileUpdate(newAddrs []*Update) []*Update {
|
||||
update := make(map[Update]bool)
|
||||
for _, u := range newAddrs {
|
||||
update[*u] = true
|
||||
func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
|
||||
var res []*Update
|
||||
for a, u := range w.curAddrs {
|
||||
if _, ok := newAddrs[a]; !ok {
|
||||
u.Op = Delete
|
||||
res = append(res, u)
|
||||
}
|
||||
for _, u := range w.curAddrs {
|
||||
if _, ok := update[*u]; ok {
|
||||
delete(update, *u)
|
||||
continue
|
||||
}
|
||||
update[Update{Addr: u.Addr, Op: Delete, Metadata: u.Metadata}] = true
|
||||
for a, u := range newAddrs {
|
||||
if _, ok := w.curAddrs[a]; !ok {
|
||||
res = append(res, u)
|
||||
}
|
||||
res := make([]*Update, 0, len(update))
|
||||
for k := range update {
|
||||
tmp := k
|
||||
res = append(res, &tmp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (w *dnsWatcher) lookupSRV() []*Update {
|
||||
var newAddrs []*Update
|
||||
func (w *dnsWatcher) lookupSRV() map[string]*Update {
|
||||
newAddrs := make(map[string]*Update)
|
||||
_, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
||||
|
@ -231,15 +227,16 @@ func (w *dnsWatcher) lookupSRV() []*Update {
|
|||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
newAddrs = append(newAddrs, &Update{Addr: a + ":" + strconv.Itoa(int(s.Port)),
|
||||
Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}})
|
||||
addr := a + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs[addr] = &Update{Addr: addr,
|
||||
Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
|
||||
}
|
||||
}
|
||||
return newAddrs
|
||||
}
|
||||
|
||||
func (w *dnsWatcher) lookupHost() []*Update {
|
||||
var newAddrs []*Update
|
||||
func (w *dnsWatcher) lookupHost() map[string]*Update {
|
||||
newAddrs := make(map[string]*Update)
|
||||
addrs, err := lookupHost(w.ctx, w.host)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
||||
|
@ -251,7 +248,8 @@ func (w *dnsWatcher) lookupHost() []*Update {
|
|||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
newAddrs = append(newAddrs, &Update{Addr: a + ":" + w.port})
|
||||
addr := a + ":" + w.port
|
||||
newAddrs[addr] = &Update{Addr: addr}
|
||||
}
|
||||
return newAddrs
|
||||
}
|
||||
|
|
|
@ -18,20 +18,26 @@
|
|||
|
||||
// Package naming defines the naming API and related data structures for gRPC.
|
||||
// The interface is EXPERIMENTAL and may be suject to change.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
package naming
|
||||
|
||||
// Operation defines the corresponding operations for a name resolution change.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Operation uint8
|
||||
|
||||
const (
|
||||
// Add indicates a new address is added.
|
||||
Add Operation = iota
|
||||
// Delete indicates an exisiting address is deleted.
|
||||
// Delete indicates an existing address is deleted.
|
||||
Delete
|
||||
)
|
||||
|
||||
// Update defines a name resolution update. Notice that it is not valid having both
|
||||
// empty string Addr and nil Metadata in an Update.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Update struct {
|
||||
// Op indicates the operation of the update.
|
||||
Op Operation
|
||||
|
@ -43,12 +49,16 @@ type Update struct {
|
|||
}
|
||||
|
||||
// Resolver creates a Watcher for a target to track its resolution changes.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Resolver interface {
|
||||
// Resolve creates a Watcher for target.
|
||||
Resolve(target string) (Watcher, error)
|
||||
}
|
||||
|
||||
// Watcher watches for the updates on the specified target.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Watcher interface {
|
||||
// Next blocks until an update or error happens. It may return one or more
|
||||
// updates. The first call should get the full set of the results. It should
|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
// actions and unblock when there's a picker update.
|
||||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
|
||||
// The latest connection happened.
|
||||
connErrMu sync.Mutex
|
||||
connErr error
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
||||
return bp
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) updateConnectionError(err error) {
|
||||
bp.connErrMu.Lock()
|
||||
bp.connErr = err
|
||||
bp.connErrMu.Unlock()
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) connectionError() error {
|
||||
bp.connErrMu.Lock()
|
||||
err := bp.connErr
|
||||
bp.connErrMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bp.picker = p
|
||||
// bp.blockingCh should never be nil.
|
||||
close(bp.blockingCh)
|
||||
bp.blockingCh = make(chan struct{})
|
||||
bp.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
acw.mu.Lock()
|
||||
ac := acw.ac
|
||||
acw.mu.Unlock()
|
||||
ac.incrCallsStarted()
|
||||
return func(b balancer.DoneInfo) {
|
||||
if b.Err != nil && b.Err != io.EOF {
|
||||
ac.incrCallsFailed()
|
||||
} else {
|
||||
ac.incrCallsSucceeded()
|
||||
}
|
||||
if done != nil {
|
||||
done(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pick returns the transport that will be used for the RPC.
|
||||
// It may block in the following cases:
|
||||
// - there's no picker
|
||||
// - the current picker returns ErrNoSubConnAvailable
|
||||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
var (
|
||||
p balancer.Picker
|
||||
ch chan struct{}
|
||||
)
|
||||
|
||||
for {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if bp.picker == nil {
|
||||
ch = bp.blockingCh
|
||||
}
|
||||
if ch == bp.blockingCh {
|
||||
// This could happen when either:
|
||||
// - bp.picker is nil (the previous if condition), or
|
||||
// - has called pick on the current picker.
|
||||
bp.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
case <-ch:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch = bp.blockingCh
|
||||
p = bp.picker
|
||||
bp.mu.Unlock()
|
||||
|
||||
subConn, done, err := p.Pick(ctx, opts)
|
||||
|
||||
if err != nil {
|
||||
switch err {
|
||||
case balancer.ErrNoSubConnAvailable:
|
||||
continue
|
||||
case balancer.ErrTransientFailure:
|
||||
if !failfast {
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||
default:
|
||||
// err is some other error.
|
||||
return nil, nil, toRPCErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
acw, ok := subConn.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
|
||||
continue
|
||||
}
|
||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, done), nil
|
||||
}
|
||||
return t, done, nil
|
||||
}
|
||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||
// If ok == false, ac.state is not READY.
|
||||
// A valid picker always returns READY subConn. This means the state of ac
|
||||
// just changed, and picker will be updated shortly.
|
||||
// continue back to the beginning of the for loop to repick.
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) close() {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
if bp.done {
|
||||
return
|
||||
}
|
||||
bp.done = true
|
||||
close(bp.blockingCh)
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
const PickFirstBalancerName = "pick_first"
|
||||
|
||||
func newPickfirstBuilder() balancer.Builder {
|
||||
return &pickfirstBuilder{}
|
||||
}
|
||||
|
||||
type pickfirstBuilder struct{}
|
||||
|
||||
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &pickfirstBalancer{cc: cc}
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) Name() string {
|
||||
return PickFirstBalancerName
|
||||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
|
||||
return
|
||||
}
|
||||
if b.sc == nil {
|
||||
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
return
|
||||
}
|
||||
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
|
||||
b.sc.Connect()
|
||||
} else {
|
||||
b.sc.UpdateAddresses(addrs)
|
||||
b.sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
||||
if b.sc != sc {
|
||||
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
|
||||
return
|
||||
}
|
||||
if s == connectivity.Shutdown {
|
||||
b.sc = nil
|
||||
return
|
||||
}
|
||||
|
||||
switch s {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
b.cc.UpdateBalancerState(s, &picker{sc: sc})
|
||||
case connectivity.Connecting:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
|
||||
case connectivity.TransientFailure:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) Close() {
|
||||
}
|
||||
|
||||
type picker struct {
|
||||
err error
|
||||
sc balancer.SubConn
|
||||
}
|
||||
|
||||
func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if p.err != nil {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
return p.sc, nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newPickfirstBuilder())
|
||||
}
|
|
@ -0,0 +1,397 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package dns implements a dns resolver to be installed as the default resolver
|
||||
// in grpc.
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPort = "443"
|
||||
defaultFreq = time.Minute * 30
|
||||
golang = "GO"
|
||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||
// described in RFC-1464 using the attribute name grpc_config.
|
||||
txtAttribute = "grpc_config="
|
||||
)
|
||||
|
||||
var (
|
||||
errMissingAddr = errors.New("dns resolver: missing address")
|
||||
|
||||
// Addresses ending with a colon that is supposed to be the separator
|
||||
// between host and port is not allowed. E.g. "::" is a valid address as
|
||||
// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
|
||||
// a colon as the host and port separator
|
||||
errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
|
||||
)
|
||||
|
||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||
func NewBuilder() resolver.Builder {
|
||||
return &dnsBuilder{minFreq: defaultFreq}
|
||||
}
|
||||
|
||||
type dnsBuilder struct {
|
||||
// minimum frequency of polling the DNS server.
|
||||
minFreq time.Duration
|
||||
}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server")
|
||||
}
|
||||
host, port, err := parseTarget(target.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// IP address.
|
||||
if net.ParseIP(host) != nil {
|
||||
host, _ = formatIP(host)
|
||||
addr := []resolver.Address{{Addr: host + ":" + port}}
|
||||
i := &ipResolver{
|
||||
cc: cc,
|
||||
ip: addr,
|
||||
rn: make(chan struct{}, 1),
|
||||
q: make(chan struct{}),
|
||||
}
|
||||
cc.NewAddress(addr)
|
||||
go i.watcher()
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// DNS address (non-IP).
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d := &dnsResolver{
|
||||
freq: b.minFreq,
|
||||
backoff: backoff.Exponential{MaxDelay: b.minFreq},
|
||||
host: host,
|
||||
port: port,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cc: cc,
|
||||
t: time.NewTimer(0),
|
||||
rn: make(chan struct{}, 1),
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Scheme returns the naming scheme of this resolver builder, which is "dns".
|
||||
func (b *dnsBuilder) Scheme() string {
|
||||
return "dns"
|
||||
}
|
||||
|
||||
// ipResolver watches for the name resolution update for an IP address.
|
||||
type ipResolver struct {
|
||||
cc resolver.ClientConn
|
||||
ip []resolver.Address
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
q chan struct{}
|
||||
}
|
||||
|
||||
// ResolveNow resend the address it stores, no resolution is needed.
|
||||
func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
select {
|
||||
case i.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the ipResolver.
|
||||
func (i *ipResolver) Close() {
|
||||
close(i.q)
|
||||
}
|
||||
|
||||
func (i *ipResolver) watcher() {
|
||||
for {
|
||||
select {
|
||||
case <-i.rn:
|
||||
i.cc.NewAddress(i.ip)
|
||||
case <-i.q:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dnsResolver watches for the name resolution update for a non-IP target.
|
||||
type dnsResolver struct {
|
||||
freq time.Duration
|
||||
backoff backoff.Exponential
|
||||
retryCount int
|
||||
host string
|
||||
port string
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
t *time.Timer
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
||||
wg sync.WaitGroup
|
||||
disableServiceConfig bool
|
||||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the dnsResolver.
|
||||
func (d *dnsResolver) Close() {
|
||||
d.cancel()
|
||||
d.wg.Wait()
|
||||
d.t.Stop()
|
||||
}
|
||||
|
||||
func (d *dnsResolver) watcher() {
|
||||
defer d.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-d.t.C:
|
||||
case <-d.rn:
|
||||
}
|
||||
result, sc := d.lookup()
|
||||
// Next lookup should happen within an interval defined by d.freq. It may be
|
||||
// more often due to exponential retry on empty address list.
|
||||
if len(result) == 0 {
|
||||
d.retryCount++
|
||||
d.t.Reset(d.backoff.Backoff(d.retryCount))
|
||||
} else {
|
||||
d.retryCount = 0
|
||||
d.t.Reset(d.freq)
|
||||
}
|
||||
d.cc.NewServiceConfig(sc)
|
||||
d.cc.NewAddress(result)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupSRV() []resolver.Address {
|
||||
var newAddrs []resolver.Address
|
||||
_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
||||
return nil
|
||||
}
|
||||
for _, s := range srvs {
|
||||
lbAddrs, err := lookupHost(d.ctx, s.Target)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
a, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
addr := a + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() string {
|
||||
ss, err := lookupTXT(d.ctx, d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
||||
return ""
|
||||
}
|
||||
var res string
|
||||
for _, s := range ss {
|
||||
res += s
|
||||
}
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
return ""
|
||||
}
|
||||
return strings.TrimPrefix(res, txtAttribute)
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() []resolver.Address {
|
||||
var newAddrs []resolver.Address
|
||||
addrs, err := lookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
||||
return nil
|
||||
}
|
||||
for _, a := range addrs {
|
||||
a, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
addr := a + ":" + d.port
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
||||
}
|
||||
return newAddrs
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
|
||||
newAddrs := d.lookupSRV()
|
||||
// Support fallback to non-balancer address.
|
||||
newAddrs = append(newAddrs, d.lookupHost()...)
|
||||
if d.disableServiceConfig {
|
||||
return newAddrs, ""
|
||||
}
|
||||
sc := d.lookupTXT()
|
||||
return newAddrs, canaryingSC(sc)
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
// If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
||||
func formatIP(addr string) (addrIP string, ok bool) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return "", false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return addr, true
|
||||
}
|
||||
return "[" + addr + "]", true
|
||||
}
|
||||
|
||||
// parseTarget takes the user input target string, returns formatted host and port info.
|
||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
|
||||
// are strippd when setting the host.
|
||||
// examples:
|
||||
// target: "www.google.com" returns host: "www.google.com", port: "443"
|
||||
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
|
||||
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
|
||||
// target: ":80" returns host: "localhost", port: "80"
|
||||
func parseTarget(target string) (host, port string, err error) {
|
||||
if target == "" {
|
||||
return "", "", errMissingAddr
|
||||
}
|
||||
if ip := net.ParseIP(target); ip != nil {
|
||||
// target is an IPv4 or IPv6(without brackets) address
|
||||
return target, defaultPort, nil
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||
if port == "" {
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
||||
return "", "", errEndsWithColon
|
||||
}
|
||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||
if host == "" {
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
||||
host = "localhost"
|
||||
}
|
||||
return host, port, nil
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
|
||||
// target doesn't have port
|
||||
return host, port, nil
|
||||
}
|
||||
return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
|
||||
}
|
||||
|
||||
type rawChoice struct {
|
||||
ClientLanguage *[]string `json:"clientLanguage,omitempty"`
|
||||
Percentage *int `json:"percentage,omitempty"`
|
||||
ClientHostName *[]string `json:"clientHostName,omitempty"`
|
||||
ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
|
||||
}
|
||||
|
||||
func containsString(a *[]string, b string) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
for _, c := range *a {
|
||||
if c == b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func chosenByPercentage(a *int) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return grpcrand.Intn(100)+1 <= *a
|
||||
}
|
||||
|
||||
func canaryingSC(js string) string {
|
||||
if js == "" {
|
||||
return ""
|
||||
}
|
||||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
||||
for _, c := range rcs {
|
||||
if !containsString(c.ClientLanguage, golang) ||
|
||||
!chosenByPercentage(c.Percentage) ||
|
||||
!containsString(c.ClientHostName, cliHostname) ||
|
||||
c.ServiceConfig == nil {
|
||||
continue
|
||||
}
|
||||
sc = string(*c.ServiceConfig)
|
||||
break
|
||||
}
|
||||
return sc
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// +build go1.6, !go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
|
||||
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
|
||||
return net.LookupSRV(service, proto, name)
|
||||
}
|
||||
lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
|
||||
)
|
|
@ -0,0 +1,29 @@
|
|||
// +build go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
var (
|
||||
lookupHost = net.DefaultResolver.LookupHost
|
||||
lookupSRV = net.DefaultResolver.LookupSRV
|
||||
lookupTXT = net.DefaultResolver.LookupTXT
|
||||
)
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package passthrough implements a pass-through resolver. It sends the target
|
||||
// name without scheme back to gRPC as resolved address.
|
||||
package passthrough
|
||||
|
||||
import "google.golang.org/grpc/resolver"
|
||||
|
||||
const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (*passthroughBuilder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
type passthroughResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||
|
||||
func (*passthroughResolver) Close() {}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&passthroughBuilder{})
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package resolver defines APIs for name resolution in gRPC.
|
||||
// All APIs in this package are experimental.
|
||||
package resolver
|
||||
|
||||
var (
|
||||
// m is a map from scheme to resolver builder.
|
||||
m = make(map[string]Builder)
|
||||
// defaultScheme is the default scheme to use.
|
||||
defaultScheme = "passthrough"
|
||||
)
|
||||
|
||||
// TODO(bar) install dns resolver in init(){}.
|
||||
|
||||
// Register registers the resolver builder to the resolver map. b.Scheme will be
|
||||
// used as the scheme registered with this builder.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Resolvers are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func Register(b Builder) {
|
||||
m[b.Scheme()] = b
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given scheme.
|
||||
//
|
||||
// If no builder is register with the scheme, nil will be returned.
|
||||
func Get(scheme string) Builder {
|
||||
if b, ok := m[scheme]; ok {
|
||||
return b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDefaultScheme sets the default scheme that will be used. The default
|
||||
// default scheme is "passthrough".
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. The scheme set last overrides
|
||||
// previously set values.
|
||||
func SetDefaultScheme(scheme string) {
|
||||
defaultScheme = scheme
|
||||
}
|
||||
|
||||
// GetDefaultScheme gets the default scheme that will be used.
|
||||
func GetDefaultScheme() string {
|
||||
return defaultScheme
|
||||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
type AddressType uint8
|
||||
|
||||
const (
|
||||
// Backend indicates the address is for a backend server.
|
||||
Backend AddressType = iota
|
||||
// GRPCLB indicates the address is for a grpclb load balancer.
|
||||
GRPCLB
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
// Type is the type of this address.
|
||||
Type AddressType
|
||||
// ServerName is the name of this address.
|
||||
//
|
||||
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// balancer, not the name of the backend.
|
||||
ServerName string
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// BuildOption includes additional information for the builder to create
|
||||
// the resolver.
|
||||
type BuildOption struct {
|
||||
// DisableServiceConfig indicates whether resolver should fetch service config data.
|
||||
DisableServiceConfig bool
|
||||
}
|
||||
|
||||
// ClientConn contains the callbacks for resolver to notify any updates
|
||||
// to the gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// NewAddress is called by resolver to notify ClientConn a new list
|
||||
// of resolved addresses.
|
||||
// The address list should be the complete list of resolved addresses.
|
||||
NewAddress(addresses []Address)
|
||||
// NewServiceConfig is called by resolver to notify ClientConn a new
|
||||
// service config. The service config should be provided as a json string.
|
||||
NewServiceConfig(serviceConfig string)
|
||||
}
|
||||
|
||||
// Target represents a target for gRPC, as specified in:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
type Target struct {
|
||||
Scheme string
|
||||
Authority string
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||||
type Builder interface {
|
||||
// Build creates a new resolver for the given target.
|
||||
//
|
||||
// gRPC dial calls Build synchronously, and fails if the returned error is
|
||||
// not nil.
|
||||
Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
|
||||
// Scheme returns the scheme supported by this resolver.
|
||||
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
// ResolveNowOption includes additional information for ResolveNow.
|
||||
type ResolveNowOption struct{}
|
||||
|
||||
// Resolver watches for the updates on the specified target.
|
||||
// Updates include address updates and service config updates.
|
||||
type Resolver interface {
|
||||
// ResolveNow will be called by gRPC to try to resolve the target name
|
||||
// again. It's just a hint, resolver can ignore this if it's not necessary.
|
||||
//
|
||||
// It could be called multiple times concurrently.
|
||||
ResolveNow(ResolveNowOption)
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
}
|
||||
|
||||
// UnregisterForTesting removes the resolver builder with the given scheme from the
|
||||
// resolver map.
|
||||
// This function is for testing only.
|
||||
func UnregisterForTesting(scheme string) {
|
||||
delete(m, scheme)
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConnection interface.
|
||||
type ccResolverWrapper struct {
|
||||
cc *ClientConn
|
||||
resolver resolver.Resolver
|
||||
addrCh chan []resolver.Address
|
||||
scCh chan string
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns ("", s, false) instead.
|
||||
func split2(s, sep string) (string, string, bool) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", "", false
|
||||
}
|
||||
return spl[0], spl[1], true
|
||||
}
|
||||
|
||||
// parseTarget splits target into a struct containing scheme, authority and
|
||||
// endpoint.
|
||||
//
|
||||
// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
|
||||
// target}.
|
||||
func parseTarget(target string) (ret resolver.Target) {
|
||||
var ok bool
|
||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||
// builder for this scheme and builds the resolver. The monitoring goroutine
|
||||
// for it is not started yet and can be created by calling start().
|
||||
//
|
||||
// If withResolverBuilder dial option is set, the specified resolver will be
|
||||
// used instead.
|
||||
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||
rb := cc.dopts.resolverBuilder
|
||||
if rb == nil {
|
||||
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
|
||||
}
|
||||
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
addrCh: make(chan []resolver.Address, 1),
|
||||
scCh: make(chan string, 1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
var err error
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) start() {
|
||||
go ccr.watcher()
|
||||
}
|
||||
|
||||
// watcher processes address updates and service config updates sequentially.
|
||||
// Otherwise, we need to resolve possible races between address and service
|
||||
// config (e.g. they specify different balancer types).
|
||||
func (ccr *ccResolverWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case <-ccr.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case addrs := <-ccr.addrCh:
|
||||
select {
|
||||
case <-ccr.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
||||
ccr.cc.handleResolvedAddrs(addrs, nil)
|
||||
case sc := <-ccr.scCh:
|
||||
select {
|
||||
case <-ccr.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
||||
ccr.cc.handleServiceConfig(sc)
|
||||
case <-ccr.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolver.Close()
|
||||
close(ccr.done)
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
select {
|
||||
case <-ccr.addrCh:
|
||||
default:
|
||||
}
|
||||
ccr.addrCh <- addrs
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implemenetion to send service
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
select {
|
||||
case <-ccr.scCh:
|
||||
default:
|
||||
}
|
||||
ccr.scCh <- sc
|
||||
}
|
|
@ -22,23 +22,30 @@ import (
|
|||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/encoding/proto"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// Compressor defines the interface gRPC uses to compress a message.
|
||||
//
|
||||
// Deprecated: use package encoding.
|
||||
type Compressor interface {
|
||||
// Do compresses p into w.
|
||||
Do(w io.Writer, p []byte) error
|
||||
|
@ -51,18 +58,39 @@ type gzipCompressor struct {
|
|||
}
|
||||
|
||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||
//
|
||||
// Deprecated: use package encoding/gzip.
|
||||
func NewGZIPCompressor() Compressor {
|
||||
c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
//
|
||||
// The error returned will be nil if the level is valid.
|
||||
//
|
||||
// Deprecated: use package encoding/gzip.
|
||||
func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||
if level < gzip.DefaultCompression || level > gzip.BestCompression {
|
||||
return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
|
||||
}
|
||||
return &gzipCompressor{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return gzip.NewWriter(ioutil.Discard)
|
||||
},
|
||||
},
|
||||
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return w
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||
z := c.pool.Get().(*gzip.Writer)
|
||||
defer c.pool.Put(z)
|
||||
z.Reset(w)
|
||||
if _, err := z.Write(p); err != nil {
|
||||
return err
|
||||
|
@ -75,6 +103,8 @@ func (c *gzipCompressor) Type() string {
|
|||
}
|
||||
|
||||
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||
//
|
||||
// Deprecated: use package encoding.
|
||||
type Decompressor interface {
|
||||
// Do reads the data from r and uncompress them.
|
||||
Do(r io.Reader) ([]byte, error)
|
||||
|
@ -87,6 +117,8 @@ type gzipDecompressor struct {
|
|||
}
|
||||
|
||||
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||
//
|
||||
// Deprecated: use package encoding/gzip.
|
||||
func NewGZIPDecompressor() Decompressor {
|
||||
return &gzipDecompressor{}
|
||||
}
|
||||
|
@ -121,17 +153,25 @@ func (d *gzipDecompressor) Type() string {
|
|||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
compressorType string
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
peer *peer.Peer
|
||||
stream *clientStream
|
||||
traceInfo traceInfo // in trace.go
|
||||
maxReceiveMessageSize *int
|
||||
maxSendMessageSize *int
|
||||
creds credentials.PerRPCCredentials
|
||||
contentSubtype string
|
||||
codec baseCodec
|
||||
disableRetry bool
|
||||
maxRetryRPCBufferSize int
|
||||
}
|
||||
|
||||
var defaultCallInfo = callInfo{failFast: true}
|
||||
func defaultCallInfo() *callInfo {
|
||||
return &callInfo{
|
||||
failFast: true,
|
||||
maxRetryRPCBufferSize: 256 * 1024, // 256KB
|
||||
}
|
||||
}
|
||||
|
||||
// CallOption configures a Call before it starts or extracts information from
|
||||
// a Call after it completes.
|
||||
|
@ -153,87 +193,260 @@ type EmptyCallOption struct{}
|
|||
func (EmptyCallOption) before(*callInfo) error { return nil }
|
||||
func (EmptyCallOption) after(*callInfo) {}
|
||||
|
||||
type beforeCall func(c *callInfo) error
|
||||
|
||||
func (o beforeCall) before(c *callInfo) error { return o(c) }
|
||||
func (o beforeCall) after(c *callInfo) {}
|
||||
|
||||
type afterCall func(c *callInfo)
|
||||
|
||||
func (o afterCall) before(c *callInfo) error { return nil }
|
||||
func (o afterCall) after(c *callInfo) { o(c) }
|
||||
|
||||
// Header returns a CallOptions that retrieves the header metadata
|
||||
// for a unary RPC.
|
||||
func Header(md *metadata.MD) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
*md = c.headerMD
|
||||
})
|
||||
return HeaderCallOption{HeaderAddr: md}
|
||||
}
|
||||
|
||||
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type HeaderCallOption struct {
|
||||
HeaderAddr *metadata.MD
|
||||
}
|
||||
|
||||
func (o HeaderCallOption) before(c *callInfo) error { return nil }
|
||||
func (o HeaderCallOption) after(c *callInfo) {
|
||||
if c.stream != nil {
|
||||
*o.HeaderAddr, _ = c.stream.Header()
|
||||
}
|
||||
}
|
||||
|
||||
// Trailer returns a CallOptions that retrieves the trailer metadata
|
||||
// for a unary RPC.
|
||||
func Trailer(md *metadata.MD) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
*md = c.trailerMD
|
||||
})
|
||||
return TrailerCallOption{TrailerAddr: md}
|
||||
}
|
||||
|
||||
// Peer returns a CallOption that retrieves peer information for a
|
||||
// unary RPC.
|
||||
func Peer(peer *peer.Peer) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
if c.peer != nil {
|
||||
*peer = *c.peer
|
||||
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type TrailerCallOption struct {
|
||||
TrailerAddr *metadata.MD
|
||||
}
|
||||
|
||||
func (o TrailerCallOption) before(c *callInfo) error { return nil }
|
||||
func (o TrailerCallOption) after(c *callInfo) {
|
||||
if c.stream != nil {
|
||||
*o.TrailerAddr = c.stream.Trailer()
|
||||
}
|
||||
}
|
||||
|
||||
// Peer returns a CallOption that retrieves peer information for a unary RPC.
|
||||
// The peer field will be populated *after* the RPC completes.
|
||||
func Peer(p *peer.Peer) CallOption {
|
||||
return PeerCallOption{PeerAddr: p}
|
||||
}
|
||||
|
||||
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||||
// peer. The peer field will be populated *after* the RPC completes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type PeerCallOption struct {
|
||||
PeerAddr *peer.Peer
|
||||
}
|
||||
|
||||
func (o PeerCallOption) before(c *callInfo) error { return nil }
|
||||
func (o PeerCallOption) after(c *callInfo) {
|
||||
if c.stream != nil {
|
||||
if x, ok := peer.FromContext(c.stream.Context()); ok {
|
||||
*o.PeerAddr = *x
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FailFast configures the action to take when an RPC is attempted on broken
|
||||
// connections or unreachable servers. If failfast is true, the RPC will fail
|
||||
// connections or unreachable servers. If failFast is true, the RPC will fail
|
||||
// immediately. Otherwise, the RPC client will block the call until a
|
||||
// connection is available (or the call is canceled or times out) and will retry
|
||||
// the call if it fails due to a transient error. Please refer to
|
||||
// connection is available (or the call is canceled or times out) and will
|
||||
// retry the call if it fails due to a transient error. gRPC will not retry if
|
||||
// data was written to the wire unless the server indicates it did not process
|
||||
// the data. Please refer to
|
||||
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
|
||||
// Note: failFast is default to true.
|
||||
//
|
||||
// By default, RPCs are "Fail Fast".
|
||||
func FailFast(failFast bool) CallOption {
|
||||
return beforeCall(func(c *callInfo) error {
|
||||
c.failFast = failFast
|
||||
return nil
|
||||
})
|
||||
return FailFastCallOption{FailFast: failFast}
|
||||
}
|
||||
|
||||
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||||
// fast or not.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type FailFastCallOption struct {
|
||||
FailFast bool
|
||||
}
|
||||
|
||||
func (o FailFastCallOption) before(c *callInfo) error {
|
||||
c.failFast = o.FailFast
|
||||
return nil
|
||||
}
|
||||
func (o FailFastCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
||||
func MaxCallRecvMsgSize(s int) CallOption {
|
||||
return beforeCall(func(o *callInfo) error {
|
||||
o.maxReceiveMessageSize = &s
|
||||
return nil
|
||||
})
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
|
||||
}
|
||||
|
||||
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size the client can receive.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxRecvMsgSizeCallOption struct {
|
||||
MaxRecvMsgSize int
|
||||
}
|
||||
|
||||
func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||||
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
|
||||
return nil
|
||||
}
|
||||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
||||
func MaxCallSendMsgSize(s int) CallOption {
|
||||
return beforeCall(func(o *callInfo) error {
|
||||
o.maxSendMessageSize = &s
|
||||
return nil
|
||||
})
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
|
||||
}
|
||||
|
||||
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size the client can send.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxSendMsgSizeCallOption struct {
|
||||
MaxSendMsgSize int
|
||||
}
|
||||
|
||||
func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
|
||||
c.maxSendMessageSize = &o.MaxSendMsgSize
|
||||
return nil
|
||||
}
|
||||
func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
|
||||
|
||||
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
|
||||
// for a call.
|
||||
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
||||
return beforeCall(func(c *callInfo) error {
|
||||
c.creds = creds
|
||||
return nil
|
||||
})
|
||||
return PerRPCCredsCallOption{Creds: creds}
|
||||
}
|
||||
|
||||
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||||
// credentials to use for the call.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type PerRPCCredsCallOption struct {
|
||||
Creds credentials.PerRPCCredentials
|
||||
}
|
||||
|
||||
func (o PerRPCCredsCallOption) before(c *callInfo) error {
|
||||
c.creds = o.Creds
|
||||
return nil
|
||||
}
|
||||
func (o PerRPCCredsCallOption) after(c *callInfo) {}
|
||||
|
||||
// UseCompressor returns a CallOption which sets the compressor used when
|
||||
// sending the request. If WithCompressor is also set, UseCompressor has
|
||||
// higher priority.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func UseCompressor(name string) CallOption {
|
||||
return CompressorCallOption{CompressorType: name}
|
||||
}
|
||||
|
||||
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type CompressorCallOption struct {
|
||||
CompressorType string
|
||||
}
|
||||
|
||||
func (o CompressorCallOption) before(c *callInfo) error {
|
||||
c.compressorType = o.CompressorType
|
||||
return nil
|
||||
}
|
||||
func (o CompressorCallOption) after(c *callInfo) {}
|
||||
|
||||
// CallContentSubtype returns a CallOption that will set the content-subtype
|
||||
// for a call. For example, if content-subtype is "json", the Content-Type over
|
||||
// the wire will be "application/grpc+json". The content-subtype is converted
|
||||
// to lowercase before being included in Content-Type. See Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If CallCustomCodec is not also used, the content-subtype will be used to
|
||||
// look up the Codec to use in the registry controlled by RegisterCodec. See
|
||||
// the documentation on RegisterCodec for details on registration. The lookup
|
||||
// of content-subtype is case-insensitive. If no such Codec is found, the call
|
||||
// will result in an error with code codes.Internal.
|
||||
//
|
||||
// If CallCustomCodec is also used, that Codec will be used for all request and
|
||||
// response messages, with the content-subtype set to the given contentSubtype
|
||||
// here for requests.
|
||||
func CallContentSubtype(contentSubtype string) CallOption {
|
||||
return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
|
||||
}
|
||||
|
||||
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||||
// used for marshaling messages.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type ContentSubtypeCallOption struct {
|
||||
ContentSubtype string
|
||||
}
|
||||
|
||||
func (o ContentSubtypeCallOption) before(c *callInfo) error {
|
||||
c.contentSubtype = o.ContentSubtype
|
||||
return nil
|
||||
}
|
||||
func (o ContentSubtypeCallOption) after(c *callInfo) {}
|
||||
|
||||
// CallCustomCodec returns a CallOption that will set the given Codec to be
|
||||
// used for all request and response messages for a call. The result of calling
|
||||
// String() will be used as the content-subtype in a case-insensitive manner.
|
||||
//
|
||||
// See Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details. Also see the documentation on RegisterCodec and
|
||||
// CallContentSubtype for more details on the interaction between Codec and
|
||||
// content-subtype.
|
||||
//
|
||||
// This function is provided for advanced users; prefer to use only
|
||||
// CallContentSubtype to select a registered codec instead.
|
||||
func CallCustomCodec(codec Codec) CallOption {
|
||||
return CustomCodecCallOption{Codec: codec}
|
||||
}
|
||||
|
||||
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||
// marshaling messages.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type CustomCodecCallOption struct {
|
||||
Codec Codec
|
||||
}
|
||||
|
||||
func (o CustomCodecCallOption) before(c *callInfo) error {
|
||||
c.codec = o.Codec
|
||||
return nil
|
||||
}
|
||||
func (o CustomCodecCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
|
||||
// used for buffering this RPC's requests for retry purposes.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func MaxRetryRPCBufferSize(bytes int) CallOption {
|
||||
return MaxRetryRPCBufferSizeCallOption{bytes}
|
||||
}
|
||||
|
||||
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
|
||||
// memory to be used for caching this RPC for retry purposes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxRetryRPCBufferSizeCallOption struct {
|
||||
MaxRetryRPCBufferSize int
|
||||
}
|
||||
|
||||
func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
|
||||
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
|
||||
return nil
|
||||
}
|
||||
func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
|
||||
|
||||
// The format of the payload: compressed or not?
|
||||
type payloadFormat uint8
|
||||
|
||||
const (
|
||||
compressionNone payloadFormat = iota // no compression
|
||||
compressionMade
|
||||
compressionNone payloadFormat = 0 // no compression
|
||||
compressionMade payloadFormat = 1 // compressed
|
||||
)
|
||||
|
||||
// parser reads complete gRPC messages from the underlying reader.
|
||||
|
@ -243,8 +456,8 @@ type parser struct {
|
|||
// error types.
|
||||
r io.Reader
|
||||
|
||||
// The header of a gRPC message. Find more detail
|
||||
// at https://grpc.io/docs/guides/wire.html.
|
||||
// The header of a gRPC message. Find more detail at
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||
header [5]byte
|
||||
}
|
||||
|
||||
|
@ -257,7 +470,7 @@ type parser struct {
|
|||
// * io.EOF, when no messages remain
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
// * an error from the status package
|
||||
// No other error values or types must be returned, which also means
|
||||
// that the underlying io.Reader must not return an incompatible
|
||||
// error.
|
||||
|
@ -272,8 +485,11 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
|||
if length == 0 {
|
||||
return pf, nil, nil
|
||||
}
|
||||
if length > uint32(maxReceiveMessageSize) {
|
||||
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||
if int64(length) > int64(maxInt) {
|
||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
|
||||
}
|
||||
if int(length) > maxReceiveMessageSize {
|
||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||
}
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
|
@ -287,77 +503,104 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
|||
return pf, msg, nil
|
||||
}
|
||||
|
||||
// encode serializes msg and prepends the message header. If msg is nil, it
|
||||
// generates the message header of 0 message length.
|
||||
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) {
|
||||
var (
|
||||
b []byte
|
||||
length uint
|
||||
)
|
||||
if msg != nil {
|
||||
var err error
|
||||
// TODO(zhaoq): optimize to reduce memory alloc and copying.
|
||||
b, err = c.Marshal(msg)
|
||||
// encode serializes msg and returns a buffer containing the message, or an
|
||||
// error if it is too large to be transmitted by grpc. If msg is nil, it
|
||||
// generates an empty message.
|
||||
func encode(c baseCodec, msg interface{}) ([]byte, error) {
|
||||
if msg == nil { // NOTE: typed nils will not be caught by this check
|
||||
return nil, nil
|
||||
}
|
||||
b, err := c.Marshal(msg)
|
||||
if err != nil {
|
||||
return nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
||||
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
||||
}
|
||||
if outPayload != nil {
|
||||
outPayload.Payload = msg
|
||||
// TODO truncate large payload.
|
||||
outPayload.Data = b
|
||||
outPayload.Length = len(b)
|
||||
if uint(len(b)) > math.MaxUint32 {
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
||||
}
|
||||
if cp != nil {
|
||||
if err := cp.Do(cbuf, b); err != nil {
|
||||
return nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||
return b, nil
|
||||
}
|
||||
b = cbuf.Bytes()
|
||||
|
||||
// compress returns the input bytes compressed by compressor or cp. If both
|
||||
// compressors are nil, returns nil.
|
||||
//
|
||||
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
|
||||
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
|
||||
if compressor == nil && cp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
length = uint(len(b))
|
||||
wrapErr := func(err error) error {
|
||||
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||
}
|
||||
if length > math.MaxUint32 {
|
||||
return nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", length)
|
||||
cbuf := &bytes.Buffer{}
|
||||
if compressor != nil {
|
||||
z, _ := compressor.Compress(cbuf)
|
||||
if _, err := z.Write(in); err != nil {
|
||||
return nil, wrapErr(err)
|
||||
}
|
||||
if err := z.Close(); err != nil {
|
||||
return nil, wrapErr(err)
|
||||
}
|
||||
} else {
|
||||
if err := cp.Do(cbuf, in); err != nil {
|
||||
return nil, wrapErr(err)
|
||||
}
|
||||
}
|
||||
return cbuf.Bytes(), nil
|
||||
}
|
||||
|
||||
const (
|
||||
payloadLen = 1
|
||||
sizeLen = 4
|
||||
headerLen = payloadLen + sizeLen
|
||||
)
|
||||
|
||||
var buf = make([]byte, payloadLen+sizeLen+len(b))
|
||||
|
||||
// Write payload format
|
||||
if cp == nil {
|
||||
buf[0] = byte(compressionNone)
|
||||
// msgHeader returns a 5-byte header for the message being transmitted and the
|
||||
// payload, which is compData if non-nil or data otherwise.
|
||||
func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
|
||||
hdr = make([]byte, headerLen)
|
||||
if compData != nil {
|
||||
hdr[0] = byte(compressionMade)
|
||||
data = compData
|
||||
} else {
|
||||
buf[0] = byte(compressionMade)
|
||||
}
|
||||
// Write length of b into buf
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(length))
|
||||
// Copy encoded msg to buf
|
||||
copy(buf[5:], b)
|
||||
|
||||
if outPayload != nil {
|
||||
outPayload.WireLength = len(buf)
|
||||
hdr[0] = byte(compressionNone)
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
// Write length of payload into buf
|
||||
binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
|
||||
return hdr, data
|
||||
}
|
||||
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
|
||||
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
|
||||
return &stats.OutPayload{
|
||||
Client: client,
|
||||
Payload: msg,
|
||||
Data: data,
|
||||
Length: len(data),
|
||||
WireLength: len(payload) + headerLen,
|
||||
SentTime: t,
|
||||
}
|
||||
}
|
||||
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
case compressionMade:
|
||||
if dc == nil || recvCompress != dc.Type() {
|
||||
return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
if recvCompress == "" || recvCompress == encoding.Identity {
|
||||
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
|
||||
}
|
||||
if !haveCompressor {
|
||||
return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
}
|
||||
default:
|
||||
return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
|
||||
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
|
||||
// For the two compressor parameters, both should not be set, but if they are,
|
||||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
|
||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -365,22 +608,37 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
|||
if inPayload != nil {
|
||||
inPayload.WireLength = len(d)
|
||||
}
|
||||
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
|
||||
return err
|
||||
|
||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||
return st.Err()
|
||||
}
|
||||
|
||||
if pf == compressionMade {
|
||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
} else {
|
||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
d, err = ioutil.ReadAll(dcReader)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(d) > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
// implementation.
|
||||
return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
}
|
||||
if inPayload != nil {
|
||||
inPayload.RecvTime = time.Now()
|
||||
|
@ -393,14 +651,13 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
|||
}
|
||||
|
||||
type rpcInfo struct {
|
||||
bytesSent bool
|
||||
bytesReceived bool
|
||||
failfast bool
|
||||
}
|
||||
|
||||
type rpcInfoContextKey struct{}
|
||||
|
||||
func newContextWithRPCInfo(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{})
|
||||
func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
|
||||
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
|
||||
}
|
||||
|
||||
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
||||
|
@ -408,17 +665,10 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
|||
return
|
||||
}
|
||||
|
||||
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
*ss = s
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Code returns the error code for err if it was produced by the rpc system.
|
||||
// Otherwise, it returns codes.Unknown.
|
||||
//
|
||||
// Deprecated; use status.FromError and Code method instead.
|
||||
// Deprecated: use status.FromError and Code method instead.
|
||||
func Code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
|
@ -429,7 +679,7 @@ func Code(err error) codes.Code {
|
|||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||
//
|
||||
// Deprecated; use status.FromError and Message method instead.
|
||||
// Deprecated: use status.FromError and Message method instead.
|
||||
func ErrorDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
|
@ -440,85 +690,78 @@ func ErrorDesc(err error) string {
|
|||
// Errorf returns an error containing an error code and a description;
|
||||
// Errorf returns nil if c is OK.
|
||||
//
|
||||
// Deprecated; use status.Errorf instead.
|
||||
// Deprecated: use status.Errorf instead.
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
return status.Errorf(c, format, a...)
|
||||
}
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
// This is EXPERIMENTAL and subject to change.
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minumum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
// setCallInfoCodec should only be called after CallOptions have been applied.
|
||||
func setCallInfoCodec(c *callInfo) error {
|
||||
if c.codec != nil {
|
||||
// codec was already set by a CallOption; use it.
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||
// clients that connect to the service should behave.
|
||||
// This is EXPERIMENTAL and subject to change.
|
||||
type ServiceConfig struct {
|
||||
// LB is the load balancer the service providers recommends. The balancer specified
|
||||
// via grpc.WithBalancer will override this.
|
||||
LB Balancer
|
||||
// Methods contains a map for the methods in this service.
|
||||
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
|
||||
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
|
||||
// Otherwise, the method has no MethodConfig to use.
|
||||
Methods map[string]MethodConfig
|
||||
if c.contentSubtype == "" {
|
||||
// No codec specified in CallOptions; use proto by default.
|
||||
c.codec = encoding.GetCodec(proto.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func min(a, b *int) *int {
|
||||
if *a < *b {
|
||||
return a
|
||||
// c.contentSubtype is already lowercased in CallContentSubtype
|
||||
c.codec = encoding.GetCodec(c.contentSubtype)
|
||||
if c.codec == nil {
|
||||
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
|
||||
}
|
||||
return b
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
||||
if mcMax == nil && doptMax == nil {
|
||||
return &defaultVal
|
||||
// parseDialTarget returns the network and address to pass to dialer
|
||||
func parseDialTarget(target string) (net string, addr string) {
|
||||
net = "tcp"
|
||||
|
||||
m1 := strings.Index(target, ":")
|
||||
m2 := strings.Index(target, ":/")
|
||||
|
||||
// handle unix:addr which will fail with url.Parse
|
||||
if m1 >= 0 && m2 < 0 {
|
||||
if n := target[0:m1]; n == "unix" {
|
||||
net = n
|
||||
addr = target[m1+1:]
|
||||
return net, addr
|
||||
}
|
||||
if mcMax != nil && doptMax != nil {
|
||||
return min(mcMax, doptMax)
|
||||
}
|
||||
if mcMax != nil {
|
||||
return mcMax
|
||||
if m2 >= 0 {
|
||||
t, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return net, target
|
||||
}
|
||||
scheme := t.Scheme
|
||||
addr = t.Path
|
||||
if scheme == "unix" {
|
||||
net = scheme
|
||||
if addr == "" {
|
||||
addr = t.Host
|
||||
}
|
||||
return net, addr
|
||||
}
|
||||
return doptMax
|
||||
}
|
||||
|
||||
// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
|
||||
// The latest support package version is 4.
|
||||
// SupportPackageIsVersion3 is kept for compability. It will be removed in the
|
||||
// next support package version update.
|
||||
const SupportPackageIsVersion3 = true
|
||||
return net, target
|
||||
}
|
||||
|
||||
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the grpc package.
|
||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||
// support package version is 5.
|
||||
//
|
||||
// This constant may be renamed in the future if a change in the generated code
|
||||
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
|
||||
// should not be referenced from any other code.
|
||||
const SupportPackageIsVersion4 = true
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.6.0-dev"
|
||||
// Older versions are kept for compatibility. They may be removed if
|
||||
// compatibility cannot be maintained.
|
||||
//
|
||||
// These constants should not be referenced from any other code.
|
||||
const (
|
||||
SupportPackageIsVersion3 = true
|
||||
SupportPackageIsVersion4 = true
|
||||
SupportPackageIsVersion5 = true
|
||||
)
|
||||
|
||||
const grpcUA = "grpc-go/" + Version
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,358 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const maxInt = int(^uint(0) >> 1)
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
//
|
||||
// Deprecated: Users should not use this struct. Service config should be received
|
||||
// through name resolver, as specified here
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
// RetryPolicy configures retry options for the method.
|
||||
retryPolicy *retryPolicy
|
||||
}
|
||||
|
||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||
// clients that connect to the service should behave.
|
||||
//
|
||||
// Deprecated: Users should not use this struct. Service config should be received
|
||||
// through name resolver, as specified here
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
type ServiceConfig struct {
|
||||
// LB is the load balancer the service providers recommends. The balancer specified
|
||||
// via grpc.WithBalancer will override this.
|
||||
LB *string
|
||||
|
||||
// Methods contains a map for the methods in this service. If there is an
|
||||
// exact match for a method (i.e. /service/method) in the map, use the
|
||||
// corresponding MethodConfig. If there's no exact match, look for the
|
||||
// default config for the service (/service/) and use the corresponding
|
||||
// MethodConfig if it exists. Otherwise, the method has no MethodConfig to
|
||||
// use.
|
||||
Methods map[string]MethodConfig
|
||||
|
||||
// If a retryThrottlingPolicy is provided, gRPC will automatically throttle
|
||||
// retry attempts and hedged RPCs when the client’s ratio of failures to
|
||||
// successes exceeds a threshold.
|
||||
//
|
||||
// For each server name, the gRPC client will maintain a token_count which is
|
||||
// initially set to maxTokens, and can take values between 0 and maxTokens.
|
||||
//
|
||||
// Every outgoing RPC (regardless of service or method invoked) will change
|
||||
// token_count as follows:
|
||||
//
|
||||
// - Every failed RPC will decrement the token_count by 1.
|
||||
// - Every successful RPC will increment the token_count by tokenRatio.
|
||||
//
|
||||
// If token_count is less than or equal to maxTokens / 2, then RPCs will not
|
||||
// be retried and hedged RPCs will not be sent.
|
||||
retryThrottling *retryThrottlingPolicy
|
||||
}
|
||||
|
||||
// retryPolicy defines the go-native version of the retry policy defined by the
|
||||
// service config here:
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||
type retryPolicy struct {
|
||||
// MaxAttempts is the maximum number of attempts, including the original RPC.
|
||||
//
|
||||
// This field is required and must be two or greater.
|
||||
maxAttempts int
|
||||
|
||||
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||
// random(0, initialBackoffMS). In general, the nth attempt will occur at
|
||||
// random(0,
|
||||
// min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
|
||||
//
|
||||
// These fields are required and must be greater than zero.
|
||||
initialBackoff time.Duration
|
||||
maxBackoff time.Duration
|
||||
backoffMultiplier float64
|
||||
|
||||
// The set of status codes which may be retried.
|
||||
//
|
||||
// Status codes are specified as strings, e.g., "UNAVAILABLE".
|
||||
//
|
||||
// This field is required and must be non-empty.
|
||||
// Note: a set is used to store this for easy lookup.
|
||||
retryableStatusCodes map[codes.Code]bool
|
||||
}
|
||||
|
||||
type jsonRetryPolicy struct {
|
||||
MaxAttempts int
|
||||
InitialBackoff string
|
||||
MaxBackoff string
|
||||
BackoffMultiplier float64
|
||||
RetryableStatusCodes []codes.Code
|
||||
}
|
||||
|
||||
// retryThrottlingPolicy defines the go-native version of the retry throttling
|
||||
// policy defined by the service config here:
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||
type retryThrottlingPolicy struct {
|
||||
// The number of tokens starts at maxTokens. The token_count will always be
|
||||
// between 0 and maxTokens.
|
||||
//
|
||||
// This field is required and must be greater than zero.
|
||||
MaxTokens float64
|
||||
// The amount of tokens to add on each successful RPC. Typically this will
|
||||
// be some number between 0 and 1, e.g., 0.1.
|
||||
//
|
||||
// This field is required and must be greater than zero. Up to 3 decimal
|
||||
// places are supported.
|
||||
TokenRatio float64
|
||||
}
|
||||
|
||||
func parseDuration(s *string) (*time.Duration, error) {
|
||||
if s == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !strings.HasSuffix(*s, "s") {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var d time.Duration
|
||||
if len(ss[0]) > 0 {
|
||||
i, err := strconv.ParseInt(ss[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
d = time.Duration(i) * time.Second
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
f, err := strconv.ParseInt(ss[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
f *= 10
|
||||
}
|
||||
d += time.Duration(f)
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
type jsonName struct {
|
||||
Service *string
|
||||
Method *string
|
||||
}
|
||||
|
||||
func (j jsonName) generatePath() (string, bool) {
|
||||
if j.Service == nil {
|
||||
return "", false
|
||||
}
|
||||
res := "/" + *j.Service + "/"
|
||||
if j.Method != nil {
|
||||
res += *j.Method
|
||||
}
|
||||
return res, true
|
||||
}
|
||||
|
||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||
type jsonMC struct {
|
||||
Name *[]jsonName
|
||||
WaitForReady *bool
|
||||
Timeout *string
|
||||
MaxRequestMessageBytes *int64
|
||||
MaxResponseMessageBytes *int64
|
||||
RetryPolicy *jsonRetryPolicy
|
||||
}
|
||||
|
||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||
type jsonSC struct {
|
||||
LoadBalancingPolicy *string
|
||||
MethodConfig *[]jsonMC
|
||||
RetryThrottling *retryThrottlingPolicy
|
||||
}
|
||||
|
||||
func parseServiceConfig(js string) (ServiceConfig, error) {
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return ServiceConfig{}, err
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
LB: rsc.LoadBalancingPolicy,
|
||||
Methods: make(map[string]MethodConfig),
|
||||
retryThrottling: rsc.RetryThrottling,
|
||||
}
|
||||
if rsc.MethodConfig == nil {
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
for _, m := range *rsc.MethodConfig {
|
||||
if m.Name == nil {
|
||||
continue
|
||||
}
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return ServiceConfig{}, err
|
||||
}
|
||||
|
||||
mc := MethodConfig{
|
||||
WaitForReady: m.WaitForReady,
|
||||
Timeout: d,
|
||||
}
|
||||
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return ServiceConfig{}, err
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
if *m.MaxRequestMessageBytes > int64(maxInt) {
|
||||
mc.MaxReqSize = newInt(maxInt)
|
||||
} else {
|
||||
mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
|
||||
}
|
||||
}
|
||||
if m.MaxResponseMessageBytes != nil {
|
||||
if *m.MaxResponseMessageBytes > int64(maxInt) {
|
||||
mc.MaxRespSize = newInt(maxInt)
|
||||
} else {
|
||||
mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
|
||||
}
|
||||
}
|
||||
for _, n := range *m.Name {
|
||||
if path, valid := n.generatePath(); valid {
|
||||
sc.Methods[path] = mc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sc.retryThrottling != nil {
|
||||
if sc.retryThrottling.MaxTokens <= 0 ||
|
||||
sc.retryThrottling.MaxTokens >= 1000 ||
|
||||
sc.retryThrottling.TokenRatio <= 0 {
|
||||
// Illegal throttling config; disable throttling.
|
||||
sc.retryThrottling = nil
|
||||
}
|
||||
}
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
|
||||
if jrp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ib, err := parseDuration(&jrp.InitialBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mb, err := parseDuration(&jrp.MaxBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if jrp.MaxAttempts <= 1 ||
|
||||
*ib <= 0 ||
|
||||
*mb <= 0 ||
|
||||
jrp.BackoffMultiplier <= 0 ||
|
||||
len(jrp.RetryableStatusCodes) == 0 {
|
||||
grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rp := &retryPolicy{
|
||||
maxAttempts: jrp.MaxAttempts,
|
||||
initialBackoff: *ib,
|
||||
maxBackoff: *mb,
|
||||
backoffMultiplier: jrp.BackoffMultiplier,
|
||||
retryableStatusCodes: make(map[codes.Code]bool),
|
||||
}
|
||||
if rp.maxAttempts > 5 {
|
||||
// TODO(retry): Make the max maxAttempts configurable.
|
||||
rp.maxAttempts = 5
|
||||
}
|
||||
for _, code := range jrp.RetryableStatusCodes {
|
||||
rp.retryableStatusCodes[code] = true
|
||||
}
|
||||
return rp, nil
|
||||
}
|
||||
|
||||
func min(a, b *int) *int {
|
||||
if *a < *b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
||||
if mcMax == nil && doptMax == nil {
|
||||
return &defaultVal
|
||||
}
|
||||
if mcMax != nil && doptMax != nil {
|
||||
return min(mcMax, doptMax)
|
||||
}
|
||||
if mcMax != nil {
|
||||
return mcMax
|
||||
}
|
||||
return doptMax
|
||||
}
|
||||
|
||||
func newInt(b int) *int {
|
||||
return &b
|
||||
}
|
|
@ -16,6 +16,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
|
||||
|
||||
// Package stats is for collecting and reporting various network and RPC stats.
|
||||
// This package is for monitoring purpose only. All fields are read-only.
|
||||
// All APIs are experimental.
|
||||
|
@ -24,6 +26,8 @@ package stats // import "google.golang.org/grpc/stats"
|
|||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// RPCStats contains stats information about RPCs.
|
||||
|
@ -131,8 +135,6 @@ func (s *OutPayload) isRPCStats() {}
|
|||
type OutHeader struct {
|
||||
// Client is true if this OutHeader is from client side.
|
||||
Client bool
|
||||
// WireLength is the wire length of header.
|
||||
WireLength int
|
||||
|
||||
// The following fields are valid only if Client is true.
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
|
@ -167,6 +169,8 @@ func (s *OutTrailer) isRPCStats() {}
|
|||
type End struct {
|
||||
// Client is true if this End is from client side.
|
||||
Client bool
|
||||
// BeginTime is the time when the RPC began.
|
||||
BeginTime time.Time
|
||||
// EndTime is the time when the RPC ends.
|
||||
EndTime time.Time
|
||||
// Error is the error the RPC ended with. It is an error generated from
|
||||
|
@ -208,3 +212,85 @@ type ConnEnd struct {
|
|||
func (s *ConnEnd) IsClient() bool { return s.Client }
|
||||
|
||||
func (s *ConnEnd) isConnStats() {}
|
||||
|
||||
type incomingTagsKey struct{}
|
||||
type outgoingTagsKey struct{}
|
||||
|
||||
// SetTags attaches stats tagging data to the context, which will be sent in
|
||||
// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
|
||||
// SetTags will overwrite the values from earlier calls.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
func SetTags(ctx context.Context, b []byte) context.Context {
|
||||
return context.WithValue(ctx, outgoingTagsKey{}, b)
|
||||
}
|
||||
|
||||
// Tags returns the tags from the context for the inbound RPC.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
func Tags(ctx context.Context) []byte {
|
||||
b, _ := ctx.Value(incomingTagsKey{}).([]byte)
|
||||
return b
|
||||
}
|
||||
|
||||
// SetIncomingTags attaches stats tagging data to the context, to be read by
|
||||
// the application (not sent in outgoing RPCs).
|
||||
//
|
||||
// This is intended for gRPC-internal use ONLY.
|
||||
func SetIncomingTags(ctx context.Context, b []byte) context.Context {
|
||||
return context.WithValue(ctx, incomingTagsKey{}, b)
|
||||
}
|
||||
|
||||
// OutgoingTags returns the tags from the context for the outbound RPC.
|
||||
//
|
||||
// This is intended for gRPC-internal use ONLY.
|
||||
func OutgoingTags(ctx context.Context) []byte {
|
||||
b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
|
||||
return b
|
||||
}
|
||||
|
||||
type incomingTraceKey struct{}
|
||||
type outgoingTraceKey struct{}
|
||||
|
||||
// SetTrace attaches stats tagging data to the context, which will be sent in
|
||||
// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
|
||||
// SetTrace will overwrite the values from earlier calls.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
func SetTrace(ctx context.Context, b []byte) context.Context {
|
||||
return context.WithValue(ctx, outgoingTraceKey{}, b)
|
||||
}
|
||||
|
||||
// Trace returns the trace from the context for the inbound RPC.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
func Trace(ctx context.Context) []byte {
|
||||
b, _ := ctx.Value(incomingTraceKey{}).([]byte)
|
||||
return b
|
||||
}
|
||||
|
||||
// SetIncomingTrace attaches stats tagging data to the context, to be read by
|
||||
// the application (not sent in outgoing RPCs). It is intended for
|
||||
// gRPC-internal use.
|
||||
func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
|
||||
return context.WithValue(ctx, incomingTraceKey{}, b)
|
||||
}
|
||||
|
||||
// OutgoingTrace returns the trace from the context for the outbound RPC. It is
|
||||
// intended for gRPC-internal use.
|
||||
func OutgoingTrace(ctx context.Context) []byte {
|
||||
b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// FromContextError converts a context error into a Status. It returns a
|
||||
// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
|
||||
// non-nil and not a context error.
|
||||
func FromContextError(err error) *Status {
|
||||
switch err {
|
||||
case nil:
|
||||
return New(codes.OK, "")
|
||||
case context.DeadlineExceeded:
|
||||
return New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return New(codes.Canceled, err.Error())
|
||||
default:
|
||||
return New(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// FromContextError converts a context error into a Status. It returns a
|
||||
// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
|
||||
// non-nil and not a context error.
|
||||
func FromContextError(err error) *Status {
|
||||
switch err {
|
||||
case nil:
|
||||
return New(codes.OK, "")
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return New(codes.Canceled, err.Error())
|
||||
default:
|
||||
return New(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
|
@ -46,7 +46,7 @@ func (se *statusError) Error() string {
|
|||
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
||||
}
|
||||
|
||||
func (se *statusError) status() *Status {
|
||||
func (se *statusError) GRPCStatus() *Status {
|
||||
return &Status{s: (*spb.Status)(se)}
|
||||
}
|
||||
|
||||
|
@ -120,15 +120,23 @@ func FromProto(s *spb.Status) *Status {
|
|||
}
|
||||
|
||||
// FromError returns a Status representing err if it was produced from this
|
||||
// package, otherwise it returns nil, false.
|
||||
// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
|
||||
// Status is returned with codes.Unknown and the original error message.
|
||||
func FromError(err error) (s *Status, ok bool) {
|
||||
if err == nil {
|
||||
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
||||
}
|
||||
if s, ok := err.(*statusError); ok {
|
||||
return s.status(), true
|
||||
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||
return se.GRPCStatus(), true
|
||||
}
|
||||
return nil, false
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
|
||||
// Convert is a convenience function which removes the need to handle the
|
||||
// boolean return value from FromError.
|
||||
func Convert(err error) *Status {
|
||||
s, _ := FromError(err)
|
||||
return s
|
||||
}
|
||||
|
||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||
|
@ -166,3 +174,16 @@ func (s *Status) Details() []interface{} {
|
|||
}
|
||||
return details
|
||||
}
|
||||
|
||||
// Code returns the Code of the error if it is a Status error, codes.OK if err
|
||||
// is nil, or codes.Unknown otherwise.
|
||||
func Code(err error) codes.Code {
|
||||
// Don't use FromError to avoid allocation of OK status.
|
||||
if err == nil {
|
||||
return codes.OK
|
||||
}
|
||||
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||
return se.GRPCStatus().Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -32,8 +32,20 @@ type Info struct {
|
|||
// TODO: More to be added.
|
||||
}
|
||||
|
||||
// ServerInHandle defines the function which runs when a new stream is created
|
||||
// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead
|
||||
// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming
|
||||
// work in this handle. Otherwise all the RPCs would slow down.
|
||||
// ServerInHandle defines the function which runs before a new stream is created
|
||||
// on the server side. If it returns a non-nil error, the stream will not be
|
||||
// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
|
||||
// The client will receive an RPC error "code = Unavailable, desc = stream
|
||||
// terminated by RST_STREAM with error code: REFUSED_STREAM".
|
||||
//
|
||||
// It's intended to be used in situations where you don't want to waste the
|
||||
// resources to accept the new stream (e.g. rate-limiting). And the content of
|
||||
// the error will be ignored and won't be sent back to the client. For other
|
||||
// general usages, please use interceptors.
|
||||
//
|
||||
// Note that it is executed in the per-connection I/O goroutine(s) instead of
|
||||
// per-RPC goroutine. Therefore, users should NOT have any
|
||||
// blocking/time-consuming work in this handle. Otherwise all the RPCs would
|
||||
// slow down. Also, for the same reason, this handle won't be called
|
||||
// concurrently by gRPC.
|
||||
type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
|
||||
// This should only be set before any RPCs are sent or received by this program.
|
||||
var EnableTracing = true
|
||||
var EnableTracing bool
|
||||
|
||||
// methodFamily returns the trace family for the given method.
|
||||
// It turns "/pkg.Service/GetFoo" into "pkg.Service".
|
||||
|
@ -76,6 +76,15 @@ func (f *firstLine) String() string {
|
|||
return line.String()
|
||||
}
|
||||
|
||||
const truncateSize = 100
|
||||
|
||||
func truncate(x string, l int) string {
|
||||
if l > len(x) {
|
||||
return x
|
||||
}
|
||||
return x[:l]
|
||||
}
|
||||
|
||||
// payload represents an RPC request or response payload.
|
||||
type payload struct {
|
||||
sent bool // whether this is an outgoing payload
|
||||
|
@ -85,9 +94,9 @@ type payload struct {
|
|||
|
||||
func (p payload) String() string {
|
||||
if p.sent {
|
||||
return fmt.Sprintf("sent: %v", p.msg)
|
||||
return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
|
||||
}
|
||||
return fmt.Sprintf("recv: %v", p.msg)
|
||||
return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
|
||||
}
|
||||
|
||||
type fmtStringer struct {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.14.0-dev"
|
|
@ -0,0 +1,89 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ `uname -a` = *"Darwin"* ]]; then
|
||||
echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -ex # Exit on error; debugging enabled.
|
||||
set -o pipefail # Fail a pipe if any sub-command fails.
|
||||
|
||||
die() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
|
||||
|
||||
if [ "$1" = "-install" ]; then
|
||||
go get -d \
|
||||
google.golang.org/grpc/...
|
||||
go get -u \
|
||||
github.com/golang/lint/golint \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
honnef.co/go/tools/cmd/staticcheck \
|
||||
github.com/client9/misspell/cmd/misspell \
|
||||
github.com/golang/protobuf/protoc-gen-go
|
||||
if [[ -z "$VET_SKIP_PROTO" ]]; then
|
||||
if [[ "$TRAVIS" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.3.0
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/travis
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
unzip ${PROTOC_FILENAME}
|
||||
bin/protoc --version
|
||||
popd
|
||||
elif ! which protoc > /dev/null; then
|
||||
die "Please install protoc into your path"
|
||||
fi
|
||||
fi
|
||||
exit 0
|
||||
elif [[ "$#" -ne 0 ]]; then
|
||||
die "Unknown argument(s): $*"
|
||||
fi
|
||||
|
||||
# TODO: Remove this check and the mangling below once "context" is imported
|
||||
# directly.
|
||||
if git status --porcelain | read; then
|
||||
die "Uncommitted or untracked files found; commit changes first"
|
||||
fi
|
||||
|
||||
git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)
|
||||
git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read)
|
||||
git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read)
|
||||
gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
|
||||
goimports -l . 2>&1 | tee /dev/stderr | (! read)
|
||||
golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
|
||||
|
||||
# Undo any edits made by this script.
|
||||
cleanup() {
|
||||
git reset --hard HEAD
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484).
|
||||
# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711).
|
||||
git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
|
||||
set +o pipefail
|
||||
# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
|
||||
go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
|
||||
set -o pipefail
|
||||
git reset --hard HEAD
|
||||
|
||||
if [[ -z "$VET_SKIP_PROTO" ]]; then
|
||||
PATH="/home/travis/bin:$PATH" make proto && \
|
||||
git status --porcelain 2>&1 | (! read) || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
# TODO(menghanl): fix errors in transport_test.
|
||||
staticcheck -ignore '
|
||||
google.golang.org/grpc/internal/transport/transport_test.go:SA2002
|
||||
google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
|
||||
google.golang.org/grpc/stats/stats_test.go:SA1019
|
||||
google.golang.org/grpc/test/end2end_test.go:SA1019
|
||||
google.golang.org/grpc/balancer_test.go:SA1019
|
||||
google.golang.org/grpc/balancer.go:SA1019
|
||||
google.golang.org/grpc/clientconn_test.go:SA1019
|
||||
' ./...
|
||||
misspell -error .
|
|
@ -2973,28 +2973,58 @@
|
|||
"revisionTime": "2017-07-31T18:20:57Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "nwfmMh930HtXA7u5HYomxSR3Ixg=",
|
||||
"checksumSHA1": "Sn2fr19yZE7k6G3ke/etP49E6Sc=",
|
||||
"path": "google.golang.org/grpc",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "/eTpFgjvMq5Bc9hYnw5fzKG4B6I=",
|
||||
"checksumSHA1": "B+kZFVP8zRiQMpoEb39Mp2oSmqg=",
|
||||
"path": "google.golang.org/grpc/balancer",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "lw+L836hLeH8+//le+C+ycddCCU=",
|
||||
"path": "google.golang.org/grpc/balancer/base",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "DJ1AtOk4Pu7bqtUMob95Hw8HPNw=",
|
||||
"path": "google.golang.org/grpc/balancer/roundrobin",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "R3tuACGAPyK4lr+oSNt1saUzC0M=",
|
||||
"path": "google.golang.org/grpc/codes",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=",
|
||||
"path": "google.golang.org/grpc/connectivity",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5ylThBvJnIcyWhL17AC9+Sdbw2E=",
|
||||
"checksumSHA1": "wA6y5rkH1v4bWBe5M1r/Hdtgma4=",
|
||||
"path": "google.golang.org/grpc/credentials",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "cfLb+pzWB+Glwp82rgfcEST1mv8=",
|
||||
"path": "google.golang.org/grpc/encoding",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "LKKkn7EYA+Do9Qwb2/SUKLFNxoo=",
|
||||
"path": "google.golang.org/grpc/encoding/proto",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "2NbY9kmMweE4VUsruRsvmViVnNg=",
|
||||
|
@ -3003,70 +3033,118 @@
|
|||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ntHev01vgZgeIh5VFRmbLx/BSTo=",
|
||||
"checksumSHA1": "ZPPSFisPDz2ANO4FBZIft+fRxyk=",
|
||||
"path": "google.golang.org/grpc/grpclog",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "pc9cweMiKQ5hVMuO9UoMGdbizaY=",
|
||||
"checksumSHA1": "MRdkTRX18dr+tG+Q2s+BHox0T64=",
|
||||
"path": "google.golang.org/grpc/health",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "W5KfI1NIGJt7JaVnLzefDZr3+4s=",
|
||||
"checksumSHA1": "aOU41miZwmHdMbzZNB4H2xz5wWI=",
|
||||
"path": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "U9vDe05/tQrvFBojOQX8Xk12W9I=",
|
||||
"checksumSHA1": "cSdzm5GhbalJbWUNrN8pRdW0uks=",
|
||||
"path": "google.golang.org/grpc/internal",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "uDJA7QK2iGnEwbd9TPqkLaM+xuU=",
|
||||
"path": "google.golang.org/grpc/internal/backoff",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "7FWQDRqF5ZhOBglkZgSxgsKzr/8=",
|
||||
"path": "google.golang.org/grpc/internal/channelz",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5dFUCEaPjKwza9kwKqgljp8ckU4=",
|
||||
"path": "google.golang.org/grpc/internal/envconfig",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "70gndc/uHwyAl3D45zqp7vyHWlo=",
|
||||
"path": "google.golang.org/grpc/internal/grpcrand",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "F/QkfMhNApNewEK84d9CM3fT6js=",
|
||||
"path": "google.golang.org/grpc/internal/transport",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "hcuHgKp8W0wIzoCnNfKI8NUss5o=",
|
||||
"path": "google.golang.org/grpc/keepalive",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "N++Ur11m6Dq3j14/Hc2Kqmxroag=",
|
||||
"checksumSHA1": "OjIAi5AzqlQ7kLtdAyjvdgMf6hc=",
|
||||
"path": "google.golang.org/grpc/metadata",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "bYKw8OIjj/ybY68eGqy7zqq6qmE=",
|
||||
"checksumSHA1": "VvGBoawND0urmYDy11FT+U1IHtU=",
|
||||
"path": "google.golang.org/grpc/naming",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=",
|
||||
"path": "google.golang.org/grpc/peer",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "53Mbn2VqooOk47EWLHHFpKEOVwE=",
|
||||
"checksumSHA1": "GEq6wwE1qWLmkaM02SjxBmmnHDo=",
|
||||
"path": "google.golang.org/grpc/resolver",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "3aaLCpBkkCNQvVPHyvPi+G0vnQI=",
|
||||
"path": "google.golang.org/grpc/resolver/dns",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=",
|
||||
"path": "google.golang.org/grpc/resolver/passthrough",
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "YclPgme2gT3S0hTkHVdE1zAxJdo=",
|
||||
"path": "google.golang.org/grpc/stats",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "3Dwz4RLstDHMPyDA7BUsYe+JP4w=",
|
||||
"checksumSHA1": "t/NhHuykWsxY0gEBd2WIv5RVBK8=",
|
||||
"path": "google.golang.org/grpc/status",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "aixGx/Kd0cj9ZlZHacpHe3XgMQ4=",
|
||||
"checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=",
|
||||
"path": "google.golang.org/grpc/tap",
|
||||
"revision": "7657092a1303cc5a6fa3fee988d57c665683a4da",
|
||||
"revisionTime": "2017-08-09T21:16:03Z"
|
||||
"revision": "339b6cb107199486c3b352f70db9e977191be8b7",
|
||||
"revisionTime": "2018-07-26T17:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "S0qdJtlMimKlOrJ4aZ/pxO5uVwg=",
|
||||
|
|
Loading…
Reference in New Issue