vendor: google.golang.org/grpc@v1.18.0

go get google.golang.org/grpc@v1.18.0
go mod tidy
go mod vendor
This commit is contained in:
Radek Simko 2019-02-20 18:51:33 +00:00
parent 626022670e
commit 5ede0bf925
No known key found for this signature in database
GPG Key ID: 1F1C84FE689A88D7
112 changed files with 55453 additions and 2471 deletions

5
go.mod
View File

@ -1,7 +1,7 @@
module github.com/hashicorp/terraform
require (
cloud.google.com/go v0.15.0
cloud.google.com/go v0.26.0
github.com/Azure/azure-sdk-for-go v21.3.0+incompatible
github.com/Azure/go-autorest v10.15.4+incompatible
github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006 // indirect
@ -43,6 +43,7 @@ require (
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect
github.com/google/go-cmp v0.2.0
github.com/google/martian v2.1.0+incompatible // indirect
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e // indirect
github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968
github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 // indirect
@ -134,7 +135,7 @@ require (
golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced
google.golang.org/api v0.0.0-20181015145326-625cd1887957
google.golang.org/appengine v1.3.0 // indirect
google.golang.org/grpc v1.14.0
google.golang.org/grpc v1.18.0
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect

18
go.sum
View File

@ -1,5 +1,5 @@
cloud.google.com/go v0.15.0 h1:/e2wXYguItvFu4fJCvhMRPIwwrimuUxI+aCVx/ahLjg=
cloud.google.com/go v0.15.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/azure-sdk-for-go v21.3.0+incompatible h1:YFvAka2WKAl2xnJkYV1e1b7E2z88AgFszDzWU18ejMY=
github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -52,6 +52,7 @@ github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d h1:aG5FcWiZTOhPQzY
github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.0 h1:HIgH5xUWXT914HCI671AxuTTqjj64UOFr7pHn48LUTI=
github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
@ -86,6 +87,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -99,6 +101,8 @@ github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e h1:CYRpN206UTHUinz3VJoLaBdy1gEGeJNsqT0mvswDcMw=
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968 h1:Pu+HW4kcQozw0QyrTTgLE+3RXNqFhQNNzhbnoLFL83c=
@ -140,8 +144,6 @@ github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c/go.mod h1:ahL
github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-plugin v0.0.0-20190212232519-b838ffee39ce h1:I3KJUf8jyMubLFeHit2ibr9YeVxJX2CXMXVM6xlB+Qk=
github.com/hashicorp/go-plugin v0.0.0-20190212232519-b838ffee39ce/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104 h1:9iQ/zrTOJqzP+kH37s6xNb6T1RysiT7fnDD3DJbspVw=
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-retryablehttp v0.5.1 h1:Vsx5XKPqPs3M6sM4U4GWyUqFS8aBiL9U5gkgvpkg4SE=
@ -195,6 +197,7 @@ github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba h1:NARVGAAgEXvoMeNPHhPFt1SBt1VMznA3Gnz9d0qj+co=
github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@ -334,6 +337,7 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:et7+NAX3lLIk5qUCTA9QelBjGE/NkhzYw/mhnr0s7nI=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -342,12 +346,14 @@ golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76 h1:xx5MUFyRQRbPk6VjWjIE1epE/
golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced h1:4oqSq7eft7MdPKBGQK11X9WYUxmj6ZLgGTqYIbY1kyw=
golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ=
@ -356,6 +362,7 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181015145326-625cd1887957 h1:jwCmWUTrTFfjsobRuGurnCQeW4NZKijaIf6yAXwLR0E=
google.golang.org/api v0.0.0-20181015145326-625cd1887957/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
@ -367,6 +374,8 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXt
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA=
google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -377,6 +386,7 @@ gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 h1:L0cnkNl4TfAXzvdrqsYEmxOHOCv2p5I3taaReO8BWFs=
labix.org/v2/mgo v0.0.0-20140701140051-000000000287/go.mod h1:Lg7AYkt1uXJoR9oeSZ3W/8IXLdvOfIITgZnommstyz4=

View File

@ -22,10 +22,13 @@ David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
James Hall <james.hall@shopify.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Mario Castro <mariocaster@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>

2
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -64,7 +64,7 @@ var (
)
var (
metaClient = &http.Client{
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
@ -72,15 +72,15 @@ var (
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}
subscribeClient = &http.Client{
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}
}}
)
// NotDefinedError is returned when requested metadata is not defined.
@ -95,74 +95,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
v, err = cl.getTrimmed(c.k)
} else {
v, err = Get(c.k)
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
@ -201,7 +143,7 @@ func testOnGCE() bool {
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
if err != nil {
resc <- false
return
@ -266,6 +208,255 @@ func systemInfoSuggestsGCE() bool {
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
@ -275,11 +466,11 @@ func systemInfoSuggestsGCE() bool {
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
@ -295,7 +486,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
@ -310,128 +501,3 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -22,9 +22,13 @@
package iam
import (
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// client abstracts the IAMPolicy API to allow multiple implementations.
@ -39,26 +43,50 @@ type grpcClient struct {
c pb.IAMPolicyClient
}
var withRetry = gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60 * time.Second,
Multiplier: 1.3,
})
})
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
var proto *pb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
return err
}, withRetry)
if err != nil {
return nil, err
}
return proto, nil
}
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
}, withRetry)
}
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
var res *pb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
return err
}, withRetry)
if err != nil {
return nil, err
}
@ -76,7 +104,15 @@ type Handle struct {
// InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
}
// InternalNewHandleClient is for use by the Google Cloud Libraries only.

54
vendor/cloud.google.com/go/internal/annotate.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/status"
)
// Annotate prepends msg to the error message in err, attempting
// to preserve other information in err, like an error code.
//
// Annotate panics if err is nil.
//
// Annotate knows about these error types:
// - "google.golang.org/grpc/status".Status
// - "google.golang.org/api/googleapi".Error
// If the error is not one of these types, Annotate behaves
// like
// fmt.Errorf("%s: %v", msg, err)
func Annotate(err error, msg string) error {
if err == nil {
panic("Annotate called with nil")
}
if s, ok := status.FromError(err); ok {
p := s.Proto()
p.Message = msg + ": " + p.Message
return status.ErrorProto(p)
}
if g, ok := err.(*googleapi.Error); ok {
g.Message = msg + ": " + g.Message
return g
}
return fmt.Errorf("%s: %v", msg, err)
}
// Annotatef uses format and args to format a string, then calls Annotate.
func Annotatef(err error, format string, args ...interface{}) error {
return Annotate(err, fmt.Sprintf(format, args...))
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,7 +15,6 @@
package internal
import (
"fmt"
"time"
gax "github.com/googleapis/gax-go"
@ -48,7 +47,7 @@ func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
}
return cerr
}

83
vendor/cloud.google.com/go/internal/trace/go18.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// ToStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}

30
vendor/cloud.google.com/go/internal/trace/not_go18.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package trace
import (
"golang.org/x/net/context"
)
// OpenCensus only supports go 1.8 and higher.
func StartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func EndSpan(context.Context, error) {
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ import (
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20170928"
const Repo = "20180226"
// Go returns the Go runtime version. The returned string
// has no whitespace.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,10 +15,10 @@
package storage
import (
"fmt"
"net/http"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
@ -48,10 +48,21 @@ const (
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
// ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
Role ACLRole
Entity ACLEntity
EntityID string
Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
@ -64,7 +75,10 @@ type ACLHandle struct {
}
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectDelete(ctx, entity)
}
@ -74,8 +88,11 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
return a.bucketDelete(ctx, entity)
}
// Set sets the permission level for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectSet(ctx, entity, role, false)
}
@ -86,7 +103,10 @@ func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) err
}
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectList(ctx)
}
@ -106,21 +126,17 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
a.configureCall(req, ctx)
return req.Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
@ -133,14 +149,9 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
return nil, err
}
r := make([]ACLRule, len(acls.Items))
for i, v := range acls.Items {
r[i].Entity = ACLEntity(v.Entity)
r[i].Role = ACLRole(v.Role)
}
return r, nil
return toBucketACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
@ -156,21 +167,17 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
return err
})
if err != nil {
return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
return err
}
return nil
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(req, ctx)
return req.Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
@ -183,9 +190,9 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
@ -206,35 +213,21 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
}
a.configureCall(req, ctx)
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
_, err := req.Do()
return err
})
if err != nil {
if isBucketDefault {
return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
} else {
return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err)
}
}
return nil
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
a.configureCall(req, ctx)
return req.Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
}
return nil
}
func (a *ACLHandle) configureCall(call interface {
Header() http.Header
}, ctx context.Context) {
func (a *ACLHandle) configureCall(call interface{ Header() http.Header }, ctx context.Context) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
@ -243,10 +236,100 @@ func (a *ACLHandle) configureCall(call interface {
setClientHeader(call.Header())
}
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
r := make([]ACLRule, 0, len(items))
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
}
return r
}
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -21,6 +21,7 @@ import (
"time"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@ -35,7 +36,7 @@ type BucketHandle struct {
acl ACLHandle
defaultObjectACL ACLHandle
conds *BucketConditions
userProject string // project for requester-pays buckets
userProject string // project for Requester Pays buckets
}
// Bucket returns a BucketHandle, which provides operations on the named bucket.
@ -63,7 +64,10 @@ func (c *Client) Bucket(name string) *BucketHandle {
// Create creates the Bucket in the project.
// If attrs is nil the API defaults will be used.
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
var bkt *raw.Bucket
if attrs != nil {
bkt = attrs.toRawBucket()
@ -78,11 +82,20 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
}
req := b.c.raw.Buckets.Insert(projectID, bkt)
setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
}
// Delete deletes the Bucket.
func (b *BucketHandle) Delete(ctx context.Context) error {
func (b *BucketHandle) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newDeleteCall()
if err != nil {
return err
@ -139,7 +152,10 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
}
// Attrs returns the metadata for the bucket.
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newGetCall()
if err != nil {
return nil, err
@ -155,7 +171,7 @@ func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
if err != nil {
return nil, err
}
return newBucket(resp), nil
return newBucket(resp)
}
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
@ -170,17 +186,26 @@ func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
return req, nil
}
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newPatchCall(&uattrs)
if err != nil {
return nil, err
}
if uattrs.PredefinedACL != "" {
req.PredefinedAcl(uattrs.PredefinedACL)
}
if uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
// TODO(jba): retry iff metagen is set?
rb, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
return newBucket(rb), nil
return newBucket(rb)
}
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
@ -210,6 +235,20 @@ type BucketAttrs struct {
// apply to new objects when no object ACL is provided.
DefaultObjectACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// It should be set only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedDefaultObjectACL string
// Location is the location of the bucket. It defaults to "US".
Location string
@ -237,9 +276,34 @@ type BucketAttrs struct {
Labels map[string]string
// RequesterPays reports whether the bucket is a Requester Pays bucket.
// Clients performing operations on Requester Pays buckets must provide
// a user project (see BucketHandle.UserProject), which will be billed
// for the operations.
RequesterPays bool
// Lifecycle is the lifecycle configuration for objects in the bucket.
Lifecycle Lifecycle
// Retention policy enforces a minimum retention time for all objects
// contained in the bucket. A RetentionPolicy of nil implies the bucket
// has no minimum data retention.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
CORS []CORS
// The encryption configuration used by default for newly inserted objects.
Encryption *BucketEncryption
// The logging configuration.
Logging *BucketLogging
// The website configuration.
Website *BucketWebsite
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
@ -247,12 +311,37 @@ type Lifecycle struct {
Rules []LifecycleRule
}
// Retention policy enforces a minimum retention time for all objects
// contained in the bucket.
//
// Any attempt to overwrite or delete objects younger than the retention
// period will result in an error. An unlocked retention policy can be
// modified or removed from the bucket via the Update method. A
// locked retention policy cannot be removed or shortened in duration
// for the lifetime of the bucket.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
type RetentionPolicy struct {
// RetentionPeriod specifies the duration that objects need to be
// retained. Retention duration must be greater than zero and less than
// 100 years. Note that enforcement of retention periods less than a day
// is not guaranteed. Such periods should only be used for testing
// purposes.
RetentionPeriod time.Duration
// EffectiveTime is the time from which the policy was enforced and
// effective. This field is read-only.
EffectiveTime time.Time
}
const (
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
rfc3339Date = "2006-01-02"
// DeleteAction is a lifecycle action that deletes a live and/or archived
// objects. Takes precendence over SetStorageClass actions.
// objects. Takes precedence over SetStorageClass actions.
DeleteAction = "Delete"
// SetStorageClassAction changes the storage class of live and/or archived
@ -332,53 +421,64 @@ type LifecycleCondition struct {
NumNewerVersions int64
}
func newBucket(b *raw.Bucket) *BucketAttrs {
// BucketLogging holds the bucket's logging configuration, which defines the
// destination bucket and optional name prefix for the current bucket's
// logs.
type BucketLogging struct {
// The destination bucket where the current bucket's logs
// should be placed.
LogBucket string
// A prefix for log object names.
LogObjectPrefix string
}
// Website holds the bucket's website configuration, controlling how the
// service behaves when accessing bucket contents as a web site. See
// https://cloud.google.com/storage/docs/static-website for more information.
type BucketWebsite struct {
// If the requested object path is missing, the service will ensure the path has
// a trailing '/', append this suffix, and attempt to retrieve the resulting
// object. This allows the creation of index.html objects to represent directory
// pages.
MainPageSuffix string
// If the requested object path is missing, and any mainPageSuffix object is
// missing, if applicable, the service will return the named object from this
// bucket as the content for a 404 Not Found result.
NotFoundPage string
}
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil
return nil, nil
}
bucket := &BucketAttrs{
rp, err := toRetentionPolicy(b.RetentionPolicy)
if err != nil {
return nil, err
}
return &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.ACL = acl
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
for i, rule := range b.DefaultObjectAcl {
objACL[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.DefaultObjectACL = objACL
return bucket
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
Logging: toBucketLogging(b.Logging),
Website: toBucketWebsite(b.Website),
}, nil
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
var acl []*raw.BucketAccessControl
if len(b.ACL) > 0 {
acl = make([]*raw.BucketAccessControl, len(b.ACL))
for i, rule := range b.ACL {
acl[i] = &raw.BucketAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
dACL := toRawObjectACL(b.DefaultObjectACL)
// Copy label map.
var labels map[string]string
if len(b.Labels) > 0 {
@ -400,24 +500,94 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
}
return &raw.Bucket{
Name: b.Name,
DefaultObjectAcl: dACL,
Location: b.Location,
StorageClass: b.StorageClass,
Acl: acl,
Acl: toRawBucketACL(b.ACL),
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
Versioning: v,
Labels: labels,
Billing: bb,
Lifecycle: toRawLifecycle(b.Lifecycle),
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
Cors: toRawCORS(b.CORS),
Encryption: b.Encryption.toRawBucketEncryption(),
Logging: b.Logging.toRawBucketLogging(),
Website: b.Website.toRawBucketWebsite(),
}
}
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
// header used in preflight responses.
MaxAge time.Duration
// Methods is the list of HTTP methods on which to include CORS response
// headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
// of methods, and means "any method".
Methods []string
// Origins is the list of Origins eligible to receive CORS response
// headers. Note: "*" is permitted in the list of origins, and means
// "any Origin".
Origins []string
// ResponseHeaders is the list of HTTP headers other than the simple
// response headers to give permission for the user-agent to share
// across domains.
ResponseHeaders []string
}
// BucketEncryption is a bucket's encryption configuration.
type BucketEncryption struct {
// A Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt
// objects inserted into this bucket, if no encryption method is specified.
// The key's location must be the same as the bucket's.
DefaultKMSKeyName string
}
type BucketAttrsToUpdate struct {
// VersioningEnabled, if set, updates whether the bucket uses versioning.
// If set, updates whether the bucket uses versioning.
VersioningEnabled optional.Bool
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
// If set, updates whether the bucket is a Requester Pays bucket.
RequesterPays optional.Bool
// If set, updates the retention policy of the bucket. Using
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// If set, replaces the CORS configuration with a new configuration.
// An empty (rather than nil) slice causes all CORS policies to be removed.
CORS []CORS
// If set, replaces the encryption configuration of the bucket. Using
// BucketEncryption.DefaultKMSKeyName = "" will delete the existing
// configuration.
Encryption *BucketEncryption
// If set, replaces the lifecycle configuration of the bucket.
Lifecycle *Lifecycle
// If set, replaces the logging configuration of the bucket.
Logging *BucketLogging
// If set, replaces the website configuration of the bucket.
Website *BucketWebsite
// If not empty, applies a predefined set of access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedDefaultObjectACL string
setLabels map[string]string
deleteLabels map[string]bool
}
@ -442,6 +612,18 @@ func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
rb := &raw.Bucket{}
if ua.CORS != nil {
rb.Cors = toRawCORS(ua.CORS)
rb.ForceSendFields = append(rb.ForceSendFields, "Cors")
}
if ua.RetentionPolicy != nil {
if ua.RetentionPolicy.RetentionPeriod == 0 {
rb.NullFields = append(rb.NullFields, "RetentionPolicy")
rb.RetentionPolicy = nil
} else {
rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy()
}
}
if ua.VersioningEnabled != nil {
rb.Versioning = &raw.BucketVersioning{
Enabled: optional.ToBool(ua.VersioningEnabled),
@ -454,6 +636,43 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
ForceSendFields: []string{"RequesterPays"},
}
}
if ua.Encryption != nil {
if ua.Encryption.DefaultKMSKeyName == "" {
rb.NullFields = append(rb.NullFields, "Encryption")
rb.Encryption = nil
} else {
rb.Encryption = ua.Encryption.toRawBucketEncryption()
}
}
if ua.Lifecycle != nil {
rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
}
if ua.Logging != nil {
if *ua.Logging == (BucketLogging{}) {
rb.NullFields = append(rb.NullFields, "Logging")
rb.Logging = nil
} else {
rb.Logging = ua.Logging.toRawBucketLogging()
}
}
if ua.Website != nil {
if *ua.Website == (BucketWebsite{}) {
rb.NullFields = append(rb.NullFields, "Website")
rb.Website = nil
} else {
rb.Website = ua.Website.toRawBucketWebsite()
}
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "Acl")
}
if ua.PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
rb.DefaultObjectAcl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
}
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -471,7 +690,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
// If returns a new BucketHandle that applies a set of preconditions.
// Preconditions already set on the BucketHandle are ignored.
// Operations on the new handle will only occur if the preconditions are
// Operations on the new handle will return an error if the preconditions are not
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
// and MetagenerationNotMatch.
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
@ -506,8 +725,10 @@ func (c *BucketConditions) validate(method string) error {
}
// UserProject returns a new BucketHandle that passes the project ID as the user
// project for all subsequent calls. A user project is required for all operations
// on requester-pays buckets.
// project for all subsequent calls. Calls with a user project will be billed to that
// project rather than to the bucket's owning project.
//
// A user project is required for all operations on Requester Pays buckets.
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
b2 := *b
b2.userProject = projectID
@ -516,6 +737,25 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
return &b2
}
// LockRetentionPolicy locks a bucket's retention policy until a previously-configured
// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less
// than a day, the retention policy is treated as a development configuration and locking
// will have no effect. The BucketHandle must have a metageneration condition that
// matches the bucket's metageneration. See BucketHandle.If.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
var metageneration int64
if b.conds != nil {
metageneration = b.conds.MetagenerationMatch
}
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
_, err := req.Context(ctx).Do()
return err
}
// applyBucketConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
@ -539,6 +779,55 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
return nil
}
func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
if rp == nil {
return nil
}
return &raw.BucketRetentionPolicy{
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
}
}
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
if rp == nil {
return nil, nil
}
t, err := time.Parse(time.RFC3339, rp.EffectiveTime)
if err != nil {
return nil, err
}
return &RetentionPolicy{
RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second,
EffectiveTime: t,
}, nil
}
func toRawCORS(c []CORS) []*raw.BucketCors {
var out []*raw.BucketCors
for _, v := range c {
out = append(out, &raw.BucketCors{
MaxAgeSeconds: int64(v.MaxAge / time.Second),
Method: v.Methods,
Origin: v.Origins,
ResponseHeader: v.ResponseHeaders,
})
}
return out
}
func toCORS(rc []*raw.BucketCors) []CORS {
var out []CORS
for _, v := range rc {
out = append(out, CORS{
MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second,
Methods: v.Method,
Origins: v.Origin,
ResponseHeaders: v.ResponseHeader,
})
}
return out
}
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
var rl raw.BucketLifecycle
if len(l.Rules) == 0 {
@ -604,10 +893,67 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
if rr.Condition.CreatedBefore != "" {
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
}
l.Rules = append(l.Rules, r)
}
return l
}
func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
if e == nil {
return nil
}
return &raw.BucketEncryption{
DefaultKmsKeyName: e.DefaultKMSKeyName,
}
}
func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
if e == nil {
return nil
}
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
if b == nil {
return nil
}
return &raw.BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
if b == nil {
return nil
}
return &BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
if w == nil {
return nil
}
return &raw.BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
if w == nil {
return nil
}
return &BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
@ -689,8 +1035,6 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
return resp.NextPageToken, nil
}
// TODO(jbd): Add storage.buckets.update.
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project
@ -736,7 +1080,7 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
req := it.client.raw.Buckets.List(it.projectID)
setClientHeader(req.Header())
req.Projection("full")
@ -746,7 +1090,6 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
var err error
err = runWithRetry(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
@ -755,7 +1098,11 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
return "", err
}
for _, item := range resp.Items {
it.buckets = append(it.buckets, newBucket(item))
b, err := newBucket(item)
if err != nil {
return "", err
}
it.buckets = append(it.buckets, b)
}
return resp.NextPageToken, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
@ -59,17 +60,32 @@ type Copier struct {
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle
}
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.src.validate(); err != nil {
return nil, err
}
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
@ -96,6 +112,12 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
@ -149,7 +171,10 @@ type Composer struct {
}
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.dst.validate(); err != nil {
return nil, err
}
@ -187,11 +212,13 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,11 +19,14 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Note: This package is in beta. Some backwards-incompatible changes may occur.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client
@ -36,6 +39,13 @@ To start working with this package, create a client:
// TODO: Handle error.
}
The client will use your default application credentials.
If you only wish to access public data, you can create
an unauthenticated client with
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a
@ -56,7 +66,7 @@ global across all projects.
Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
@ -69,15 +79,16 @@ Attrs:
Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets. You can use the
standard Go io.Reader and io.Writer interfaces to read and write
object data:
refer to objects using a handle, just as with buckets, but unlike buckets
you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will overwrite whatever is there.
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
@ -152,10 +163,5 @@ SignedURL for details.
// TODO: Handle error.
}
fmt.Println(url)
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package storage // import "cloud.google.com/go/storage"

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -24,6 +24,8 @@ func shouldRetry(err error) bool {
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -24,3 +24,7 @@ import (
func withContext(r *http.Request, ctx context.Context) *http.Request {
return r.WithContext(ctx)
}
func goHTTPUncompressed(res *http.Response) bool {
return res.Uncompressed
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -16,6 +16,7 @@ package storage
import (
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
@ -23,21 +24,30 @@ import (
// IAM provides access to IAM access control for the bucket.
func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name)
return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
}, b.name)
}
// iamClient implements the iam.client interface.
type iamClient struct {
raw *raw.Service
raw *raw.Service
userProject string
}
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
req := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(req.Header())
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy
var err error
err = runWithRetry(ctx, func() error {
rp, err = req.Context(ctx).Do()
rp, err = call.Context(ctx).Do()
return err
})
if err != nil {
@ -46,23 +56,34 @@ func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, er
return iamFromStoragePolicy(rp), nil
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p)
req := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(req.Header())
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
return runWithRetry(ctx, func() error {
_, err := req.Context(ctx).Do()
_, err := call.Context(ctx).Do()
return err
})
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
req := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(req.Header())
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
var err error
err = runWithRetry(ctx, func() error {
res, err = req.Context(ctx).Do()
res, err = call.Context(ctx).Do()
return err
})
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -34,6 +34,8 @@ func shouldRetry(err error) bool {
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -24,3 +24,10 @@ func withContext(r *http.Request, _ interface{}) *http.Request {
// In Go 1.6 and below, ignore the context.
return r
}
// Go 1.6 doesn't have http.Response.Uncompressed, so we can't know whether the Go
// HTTP stack uncompressed a gzip file. As a good approximation, assume that
// the lack of a Content-Length header means that it did uncompress.
func goHTTPUncompressed(res *http.Response) bool {
return res.Header.Get("Content-Length") == ""
}

188
vendor/cloud.google.com/go/storage/notifications.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"errors"
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
// A Notification describes how to send Cloud PubSub messages when certain
// events occur in a bucket.
type Notification struct {
//The ID of the notification.
ID string
// The ID of the topic to which this subscription publishes.
TopicID string
// The ID of the project to which the topic belongs.
TopicProjectID string
// Only send notifications about listed event types. If empty, send notifications
// for all event types.
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
EventTypes []string
// If present, only apply this notification configuration to object names that
// begin with this prefix.
ObjectNamePrefix string
// An optional list of additional attributes to attach to each Cloud PubSub
// message published for this notification subscription.
CustomAttributes map[string]string
// The contents of the message payload.
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
PayloadFormat string
}
// Values for Notification.PayloadFormat.
const (
// Send no payload with notification messages.
NoPayload = "NONE"
// Send object metadata as JSON with notification messages.
JSONPayload = "JSON_API_V1"
)
// Values for Notification.EventTypes.
const (
// Event that occurs when an object is successfully created.
ObjectFinalizeEvent = "OBJECT_FINALIZE"
// Event that occurs when the metadata of an existing object changes.
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
// Event that occurs when an object is permanently deleted.
ObjectDeleteEvent = "OBJECT_DELETE"
// Event that occurs when the live version of an object becomes an
// archived version.
ObjectArchiveEvent = "OBJECT_ARCHIVE"
)
func toNotification(rn *raw.Notification) *Notification {
n := &Notification{
ID: rn.Id,
EventTypes: rn.EventTypes,
ObjectNamePrefix: rn.ObjectNamePrefix,
CustomAttributes: rn.CustomAttributes,
PayloadFormat: rn.PayloadFormat,
}
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
return n
}
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns
// "?" for both IDs.
func parseNotificationTopic(nt string) (projectID, topicID string) {
matches := topicRE.FindStringSubmatch(nt)
if matches == nil {
return "?", "?"
}
return matches[1], matches[2]
}
func toRawNotification(n *Notification) *raw.Notification {
return &raw.Notification{
Id: n.ID,
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
n.TopicProjectID, n.TopicID),
EventTypes: n.EventTypes,
ObjectNamePrefix: n.ObjectNamePrefix,
CustomAttributes: n.CustomAttributes,
PayloadFormat: string(n.PayloadFormat),
}
}
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
}
if n.TopicProjectID == "" {
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
}
if n.TopicID == "" {
return nil, errors.New("storage: AddNotification: missing TopicID")
}
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
rn, err := call.Context(ctx).Do()
if err != nil {
return nil, err
}
return toNotification(rn), nil
}
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return notificationsToMap(res.Items), nil
}
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
m := map[string]*Notification{}
for _, rn := range rns {
m[rn.Id] = toNotification(rn)
}
return m
}
// DeleteNotification deletes the notification with the given ID.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
return call.Context(ctx).Do()
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,23 +15,210 @@
package storage
import (
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !goHTTPUncompressed(res) && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
contentEncoding: res.Header.Get("Content-Encoding"),
cacheControl: res.Header.Get("Cache-Control"),
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
func uncompressedByServer(res *http.Response) bool {
// If the data is stored as gzip but is not encoded as gzip, then it
// was uncompressed by the server.
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
res.Header.Get("Content-Encoding") != "gzip"
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
// It implements io.Reader.
//
// Typically, a Reader computes the CRC of the downloaded content and compares it to
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
body io.ReadCloser
remain, size int64
contentType string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
body io.ReadCloser
seen, remain, size int64
contentType string
contentEncoding string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
}
// Close closes the Reader. It must be called when done reading.
@ -40,7 +227,7 @@ func (r *Reader) Close() error {
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.body.Read(p)
n, err := r.readWithRetry(p)
if r.remain != -1 {
r.remain -= int64(n)
}
@ -49,14 +236,45 @@ func (r *Reader) Read(p []byte) (int, error) {
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if r.remain == 0 && r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
}
}
}
return n, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.
@ -74,6 +292,11 @@ func (r *Reader) ContentType() string {
return r.contentType
}
// ContentEncoding returns the content encoding of the object.
func (r *Reader) ContentEncoding() string {
return r.contentEncoding
}
// CacheControl returns the cache control of the object.
func (r *Reader) CacheControl() string {
return r.cacheControl

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -26,15 +26,17 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
@ -170,7 +172,7 @@ type SignedURLOptions struct {
// Optional.
ContentType string
// Headers is a list of extention headers the client must provide
// Headers is a list of extension headers the client must provide
// in order to use the generated signed URL.
// Optional.
Headers []string
@ -182,6 +184,60 @@ type SignedURLOptions struct {
MD5 string
}
var (
canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`)
excludedCanonicalHeaders = map[string]bool{
"x-goog-encryption-key": true,
"x-goog-encryption-key-sha256": true,
}
)
// sanitizeHeaders applies the specifications for canonical extension headers at
// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
func sanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
// Only keep canonical headers, discard any others.
headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader)
if len(headerMatches) == 0 {
continue
}
header := strings.ToLower(strings.TrimSpace(headerMatches[1]))
if excludedCanonicalHeaders[headerMatches[1]] {
// Do not keep any deliberately excluded canonical headers when signing.
continue
}
value := strings.TrimSpace(headerMatches[2])
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[header] = append(headerMap[header], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the
// header name from the header value or around the values
// themselves. The values should be separated by commas.
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases
// anyway and we should assume that there will be no
// canonical headers using empty values. Any such headers
// are discarded at the regexp stage above.
sanitizedHeaders = append(
sanitizedHeaders,
fmt.Sprintf("%s:%s", header, strings.Join(values, ",")),
)
}
sort.Strings(sanitizedHeaders)
return sanitizedHeaders
}
// SignedURL returns a URL for the specified object. Signed URLs allow
// the users access to a restricted resource for a limited time without having a
// Google account or signing in. For more information about the signed
@ -208,6 +264,7 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
return "", errors.New("storage: invalid MD5 checksum")
}
}
opts.Headers = sanitizeHeaders(opts.Headers)
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
@ -258,14 +315,15 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
}
// ACL provides access to the object's access control list.
@ -288,7 +346,7 @@ func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
// If returns a new ObjectHandle that applies a set of preconditions.
// Preconditions already set on the ObjectHandle are ignored.
// Operations on the new handle will only occur if the preconditions are
// Operations on the new handle will return an error if the preconditions are not
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
// for more details.
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
@ -310,7 +368,10 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
@ -325,7 +386,6 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -340,7 +400,10 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
// Update updates an object with the provided attributes.
// All zero-value attributes are ignored.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) {
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
@ -404,11 +467,13 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -439,6 +504,10 @@ type ObjectAttrsToUpdate struct {
CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
@ -467,140 +536,13 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
return err
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
// ReadCompressed when true causes the read to happen without decompressing.
func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
o2 := *o
o2.readCompressed = compressed
return &o2
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if length < 0 && offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
var size int64 // total size of object, even if a range was requested.
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var (
checkCRC bool
crc uint32
)
// Even if there is a CRC header, we can't compute the hash on partial data.
if remain == size {
crc, checkCRC = parseCRC32c(res)
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
cacheControl: res.Header.Get("Cache-Control"),
wantCRC: crc,
checkCRC: checkCRC,
}, nil
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
@ -638,11 +580,10 @@ func (o *ObjectHandle) validate() error {
return nil
}
// parseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
// parseKey converts the binary contents of a private key file to an
// *rsa.PrivateKey. It detects whether the private key is in a PEM container or
// not. If so, it extracts the private key from PEM container before
// conversion. It only supports PEM containers with no passphrase.
func parseKey(key []byte) (*rsa.PrivateKey, error) {
if block, _ := pem.Decode(key); block != nil {
key = block.Bytes
@ -661,23 +602,8 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) {
return parsed, nil
}
func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
var acl []*raw.ObjectAccessControl
if len(oldACL) > 0 {
acl = make([]*raw.ObjectAccessControl, len(oldACL))
for i, rule := range oldACL {
acl[i] = &raw.ObjectAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
return acl
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
acl := toRawObjectACL(o.ACL)
return &raw.Object{
Bucket: bucket,
Name: o.Name,
@ -687,7 +613,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: acl,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
}
}
@ -715,6 +641,14 @@ type ObjectAttrs struct {
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
@ -788,6 +722,14 @@ type ObjectAttrs struct {
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
@ -809,13 +751,6 @@ func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
acl := make([]ACLRule, len(o.Acl))
for i, rule := range o.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
@ -832,7 +767,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ACL: acl,
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
@ -845,6 +780,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),

48071
vendor/cloud.google.com/go/storage/storage.replay generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"io"
"sync"
"unicode/utf8"
"golang.org/x/net/context"
@ -47,8 +48,11 @@ type Writer struct {
// to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request.
//
// ChunkSize will default to a reasonable value. Any custom configuration
// must be done before the first Write call.
// ChunkSize will default to a reasonable value. If you perform many concurrent
// writes of small objects, you may wish set ChunkSize to a value that matches
// your objects' sizes to avoid consuming large amounts of memory.
//
// ChunkSize must be set before the first Write call.
ChunkSize int
// ProgressFunc can be used to monitor the progress of a large write.
@ -68,8 +72,10 @@ type Writer struct {
pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set.
err error
obj *ObjectAttrs
mu sync.Mutex
err error
}
func (w *Writer) open() error {
@ -82,6 +88,9 @@ func (w *Writer) open() error {
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
@ -113,9 +122,17 @@ func (w *Writer) open() error {
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err
pr.CloseWithError(w.err)
w.mu.Unlock()
pr.CloseWithError(err)
return
}
var resp *raw.Object
@ -142,8 +159,10 @@ func (w *Writer) open() error {
}
}
if err != nil {
w.mu.Lock()
w.err = err
pr.CloseWithError(w.err)
w.mu.Unlock()
pr.CloseWithError(err)
return
}
w.obj = newObject(resp)
@ -158,8 +177,11 @@ func (w *Writer) open() error {
// use the error returned from Writer.Close to determine if
// the upload was successful.
func (w *Writer) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
w.mu.Lock()
werr := w.err
w.mu.Unlock()
if werr != nil {
return 0, werr
}
if !w.opened {
if err := w.open(); err != nil {
@ -182,11 +204,15 @@ func (w *Writer) Close() error {
return err
}
<-w.donec
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
//
// Deprecated: cancel the context passed to NewWriter instead.
func (w *Writer) CloseWithError(err error) error {
if !w.opened {
return nil

View File

@ -0,0 +1,246 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/code.proto
package code // import "google.golang.org/genproto/googleapis/rpc/code"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The canonical error codes for Google APIs.
//
//
// Sometimes multiple error codes may apply. Services should return
// the most specific error code that applies. For example, prefer
// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
type Code int32
const (
// Not an error; returned on success
//
// HTTP Mapping: 200 OK
Code_OK Code = 0
// The operation was cancelled, typically by the caller.
//
// HTTP Mapping: 499 Client Closed Request
Code_CANCELLED Code = 1
// Unknown error. For example, this error may be returned when
// a `Status` value received from another address space belongs to
// an error space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
//
// HTTP Mapping: 500 Internal Server Error
Code_UNKNOWN Code = 2
// The client specified an invalid argument. Note that this differs
// from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
//
// HTTP Mapping: 400 Bad Request
Code_INVALID_ARGUMENT Code = 3
// The deadline expired before the operation could complete. For operations
// that change the state of the system, this error may be returned
// even if the operation has completed successfully. For example, a
// successful response from a server could have been delayed long
// enough for the deadline to expire.
//
// HTTP Mapping: 504 Gateway Timeout
Code_DEADLINE_EXCEEDED Code = 4
// Some requested entity (e.g., file or directory) was not found.
//
// Note to server developers: if a request is denied for an entire class
// of users, such as gradual feature rollout or undocumented whitelist,
// `NOT_FOUND` may be used. If a request is denied for some users within
// a class of users, such as user-based access control, `PERMISSION_DENIED`
// must be used.
//
// HTTP Mapping: 404 Not Found
Code_NOT_FOUND Code = 5
// The entity that a client attempted to create (e.g., file or directory)
// already exists.
//
// HTTP Mapping: 409 Conflict
Code_ALREADY_EXISTS Code = 6
// The caller does not have permission to execute the specified
// operation. `PERMISSION_DENIED` must not be used for rejections
// caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
// instead for those errors). `PERMISSION_DENIED` must not be
// used if the caller can not be identified (use `UNAUTHENTICATED`
// instead for those errors). This error code does not imply the
// request is valid or the requested entity exists or satisfies
// other pre-conditions.
//
// HTTP Mapping: 403 Forbidden
Code_PERMISSION_DENIED Code = 7
// The request does not have valid authentication credentials for the
// operation.
//
// HTTP Mapping: 401 Unauthorized
Code_UNAUTHENTICATED Code = 16
// Some resource has been exhausted, perhaps a per-user quota, or
// perhaps the entire file system is out of space.
//
// HTTP Mapping: 429 Too Many Requests
Code_RESOURCE_EXHAUSTED Code = 8
// The operation was rejected because the system is not in a state
// required for the operation's execution. For example, the directory
// to be deleted is non-empty, an rmdir operation is applied to
// a non-directory, etc.
//
// Service implementors can use the following guidelines to decide
// between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
// (a) Use `UNAVAILABLE` if the client can retry just the failing call.
// (b) Use `ABORTED` if the client should retry at a higher level
// (e.g., when a client-specified test-and-set fails, indicating the
// client should restart a read-modify-write sequence).
// (c) Use `FAILED_PRECONDITION` if the client should not retry until
// the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, `FAILED_PRECONDITION`
// should be returned since the client should not retry unless
// the files are deleted from the directory.
//
// HTTP Mapping: 400 Bad Request
Code_FAILED_PRECONDITION Code = 9
// The operation was aborted, typically due to a concurrency issue such as
// a sequencer check failure or transaction abort.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.
//
// HTTP Mapping: 409 Conflict
Code_ABORTED Code = 10
// The operation was attempted past the valid range. E.g., seeking or
// reading past end-of-file.
//
// Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
// be fixed if the system state changes. For example, a 32-bit file
// system will generate `INVALID_ARGUMENT` if asked to read at an
// offset that is not in the range [0,2^32-1], but it will generate
// `OUT_OF_RANGE` if asked to read from an offset past the current
// file size.
//
// There is a fair bit of overlap between `FAILED_PRECONDITION` and
// `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an `OUT_OF_RANGE` error to detect when
// they are done.
//
// HTTP Mapping: 400 Bad Request
Code_OUT_OF_RANGE Code = 11
// The operation is not implemented or is not supported/enabled in this
// service.
//
// HTTP Mapping: 501 Not Implemented
Code_UNIMPLEMENTED Code = 12
// Internal errors. This means that some invariants expected by the
// underlying system have been broken. This error code is reserved
// for serious errors.
//
// HTTP Mapping: 500 Internal Server Error
Code_INTERNAL Code = 13
// The service is currently unavailable. This is most likely a
// transient condition, which can be corrected by retrying with
// a backoff.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.
//
// HTTP Mapping: 503 Service Unavailable
Code_UNAVAILABLE Code = 14
// Unrecoverable data loss or corruption.
//
// HTTP Mapping: 500 Internal Server Error
Code_DATA_LOSS Code = 15
)
var Code_name = map[int32]string{
0: "OK",
1: "CANCELLED",
2: "UNKNOWN",
3: "INVALID_ARGUMENT",
4: "DEADLINE_EXCEEDED",
5: "NOT_FOUND",
6: "ALREADY_EXISTS",
7: "PERMISSION_DENIED",
16: "UNAUTHENTICATED",
8: "RESOURCE_EXHAUSTED",
9: "FAILED_PRECONDITION",
10: "ABORTED",
11: "OUT_OF_RANGE",
12: "UNIMPLEMENTED",
13: "INTERNAL",
14: "UNAVAILABLE",
15: "DATA_LOSS",
}
var Code_value = map[string]int32{
"OK": 0,
"CANCELLED": 1,
"UNKNOWN": 2,
"INVALID_ARGUMENT": 3,
"DEADLINE_EXCEEDED": 4,
"NOT_FOUND": 5,
"ALREADY_EXISTS": 6,
"PERMISSION_DENIED": 7,
"UNAUTHENTICATED": 16,
"RESOURCE_EXHAUSTED": 8,
"FAILED_PRECONDITION": 9,
"ABORTED": 10,
"OUT_OF_RANGE": 11,
"UNIMPLEMENTED": 12,
"INTERNAL": 13,
"UNAVAILABLE": 14,
"DATA_LOSS": 15,
}
func (x Code) String() string {
return proto.EnumName(Code_name, int32(x))
}
func (Code) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_code_932ba152e0df0902, []int{0}
}
func init() {
proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
}
func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_code_932ba152e0df0902) }
var fileDescriptor_code_932ba152e0df0902 = []byte{
// 362 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31,
0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c,
0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42,
0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1,
0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61,
0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c,
0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c,
0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34,
0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c,
0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97,
0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3,
0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60,
0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8,
0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75,
0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40,
0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31,
0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53,
0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9,
0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90,
0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d,
0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9,
0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00,
0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00,
}

View File

@ -2,33 +2,36 @@ language: go
matrix:
include:
- go: 1.11.x
env: VET=1 GO111MODULE=on
- go: 1.11.x
env: RACE=1 GO111MODULE=on
- go: 1.11.x
env: RUN386=1
- go: 1.11.x
env: GRPC_GO_RETRY=on
- go: 1.10.x
env: VET=1 RACE=1
- go: 1.6.x
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
- go: 1.9.x
env: GAE=1
- go: 1.10.x
env: RUN386=1
- go: 1.10.x
env: GRPC_GO_RETRY=on
go_import_path: google.golang.org/grpc
before_install:
- if [[ -n "$RUN386" ]]; then export GOARCH=386; fi
- if [[ "$TRAVIS_EVENT_TYPE" = "cron" && -z "$RUN386" ]]; then RACE=1; fi
- if [[ "$TRAVIS_EVENT_TYPE" != "cron" ]]; then VET_SKIP_PROTO=1; fi
- if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
- if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
- if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
- if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi
install:
- if [[ "$GAE" = 1 ]]; then source ./install_gae.sh; fi
- if [[ "$VET" = 1 ]]; then ./vet.sh -install; fi
- try3() { eval "$*" || eval "$*" || eval "$*"; }
- try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
- if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi
- if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi
script:
- set -e
- if [[ "$GAE" = 1 ]]; then make testappengine; exit 0; fi
- if [[ "$VET" = 1 ]]; then ./vet.sh; fi
- if [[ "${VET}" = 1 ]]; then ./vet.sh; fi
- if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi
- if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi
- make test
- if [[ "$RACE" = 1 ]]; then make testrace; fi

View File

@ -1,23 +1,14 @@
all: vet test testrace
deps:
go get -d -v google.golang.org/grpc/...
updatedeps:
go get -d -v -u -f google.golang.org/grpc/...
testdeps:
go get -d -v -t google.golang.org/grpc/...
testgaedeps:
goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
updatetestdeps:
go get -d -v -t -u -f google.golang.org/grpc/...
all: vet test testrace testappengine
build: deps
go build google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
deps:
go get -d -v google.golang.org/grpc/...
proto:
@ if ! which protoc > /dev/null; then \
echo "error: protoc not installed" >&2; \
@ -25,31 +16,45 @@ proto:
fi
go generate google.golang.org/grpc/...
vet:
./vet.sh
test: testdeps
go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
testappengine: testappenginedeps
goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
testappenginedeps:
goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
testdeps:
go get -d -v -t google.golang.org/grpc/...
testrace: testdeps
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
testappengine: testgaedeps
goapp test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
updatedeps:
go get -d -v -u -f google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
updatetestdeps:
go get -d -v -t -u -f google.golang.org/grpc/...
vet: vetdeps
./vet.sh
vetdeps:
./vet.sh -install
.PHONY: \
all \
deps \
updatedeps \
testdeps \
testgaedeps \
updatetestdeps \
build \
clean \
deps \
proto \
vet \
test \
testappengine \
testappenginedeps \
testdeps \
testrace \
clean
updatedeps \
updatetestdeps \
vet \
vetdeps

View File

@ -16,7 +16,7 @@ $ go get -u google.golang.org/grpc
Prerequisites
-------------
This requires Go 1.6 or later. Go 1.7 will be required soon.
gRPC-Go requires Go 1.9 or later.
Constraints
-----------
@ -43,3 +43,25 @@ Please update proto package, gRPC package and rebuild the proto files:
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- `go get -u google.golang.org/grpc`
- `protoc --go_out=plugins=grpc:. *.proto`
#### How to turn on logging
The default logger is controlled by the environment variables. Turn everything
on by setting:
```
GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
```
#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
This error means the connection the RPC is using was closed, and there are many
possible reasons, including:
1. mis-configured transport credentials, connection failed on handshaking
1. bytes disrupted, possibly by a proxy in between
1. server shutdown
It can be tricky to debug this because the error happens on the client side but
the root cause of the connection being closed is on the server side. Turn on
logging on __both client and server__, and see if there are any transport
errors.

View File

@ -19,11 +19,10 @@
package grpc
import (
"fmt"
"context"
"net"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
@ -118,26 +117,6 @@ type Balancer interface {
Close() error
}
// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
// call of Balancer.
type downErr struct {
timeout bool
temporary bool
desc string
}
func (e downErr) Error() string { return e.desc }
func (e downErr) Timeout() bool { return e.timeout }
func (e downErr) Temporary() bool { return e.temporary }
func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
return downErr{
timeout: timeout,
temporary: temporary,
desc: fmt.Sprintf(format, a...),
}
}
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
// the name resolution updates and updates the addresses available correspondingly.
//
@ -410,7 +389,3 @@ func (rr *roundRobin) Close() error {
type pickFirst struct {
*roundRobin
}
func pickFirstBalancerV1(r naming.Resolver) Balancer {
return &pickFirst{&roundRobin{r: r}}
}

View File

@ -21,13 +21,15 @@
package balancer
import (
"context"
"errors"
"net"
"strings"
"golang.org/x/net/context"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
)
@ -46,6 +48,18 @@ func Register(b Builder) {
m[strings.ToLower(b.Name())] = b
}
// unregisterForTesting deletes the balancer with the given name from the
// balancer map.
//
// This function is not thread-safe.
func unregisterForTesting(name string) {
delete(m, name)
}
func init() {
internal.BalancerUnregister = unregisterForTesting
}
// Get returns the resolver builder registered with the given name.
// Note that the compare is done in a case-insenstive fashion.
// If no builder is register with the name, nil will be returned.
@ -88,7 +102,15 @@ type SubConn interface {
}
// NewSubConnOptions contains options to create new SubConn.
type NewSubConnOptions struct{}
type NewSubConnOptions struct {
// CredsBundle is the credentials bundle that will be used in the created
// SubConn. If it's nil, the original creds from grpc DialOptions will be
// used.
CredsBundle credentials.Bundle
// HealthCheckEnabled indicates whether health check service should be
// enabled on this SubConn
HealthCheckEnabled bool
}
// ClientConn represents a gRPC ClientConn.
//
@ -125,6 +147,8 @@ type BuildOptions struct {
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
// CredsBundle is the credentials bundle that the Balancer can use.
CredsBundle credentials.Bundle
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
@ -147,12 +171,17 @@ type PickOptions struct {
// FullMethodName is the method name that NewClientStream() is called
// with. The canonical format is /service/Method.
FullMethodName string
// Header contains the metadata from the RPC's client header. The metadata
// should not be modified; make a copy first if needed.
Header metadata.MD
}
// DoneInfo contains additional information for done.
type DoneInfo struct {
// Err is the rpc error the RPC finished with. It could be nil.
Err error
// Trailer contains the metadata from the RPC's trailer, if present.
Trailer metadata.MD
// BytesSent indicates if any bytes have been sent to the server.
BytesSent bool
// BytesReceived indicates if any byte has been received from the server.

View File

@ -19,7 +19,8 @@
package base
import (
"golang.org/x/net/context"
"context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
@ -29,6 +30,7 @@ import (
type baseBuilder struct {
name string
pickerBuilder PickerBuilder
config Config
}
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
@ -38,11 +40,12 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &connectivityStateEvaluator{},
csEvltr: &balancer.ConnectivityStateEvaluator{},
// Initialize picker to a picker that always return
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
// may call UpdateBalancerState with this picker.
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
config: bb.config,
}
}
@ -54,12 +57,13 @@ type baseBalancer struct {
cc balancer.ClientConn
pickerBuilder PickerBuilder
csEvltr *connectivityStateEvaluator
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
subConns map[resolver.Address]balancer.SubConn
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
config Config
}
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
@ -74,7 +78,7 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error)
addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok {
// a is a new address (not existing in b.subConns).
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
if err != nil {
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue
@ -133,7 +137,7 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv
}
oldAggrState := b.state
b.state = b.csEvltr.recordTransition(oldS, s)
b.state = b.csEvltr.RecordTransition(oldS, s)
// Regenerate picker when one of the following happens:
// - this sc became ready from not-ready
@ -165,44 +169,3 @@ type errPicker struct {
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, p.err
}
// connectivityStateEvaluator gets updated by addrConns when their
// states transition, based on which it evaluates the state of
// ClientConn.
type connectivityStateEvaluator struct {
numReady uint64 // Number of addrConns in ready state.
numConnecting uint64 // Number of addrConns in connecting state.
numTransientFailure uint64 // Number of addrConns in transientFailure.
}
// recordTransition records state change happening in every subConn and based on
// that it evaluates what aggregated state should be.
// It can only transition between Ready, Connecting and TransientFailure. Other states,
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
// closes it is in Shutdown state.
//
// recordTransition should only be called synchronously from the same goroutine.
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
// Update counters.
for idx, state := range []connectivity.State{oldState, newState} {
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
switch state {
case connectivity.Ready:
cse.numReady += updateVal
case connectivity.Connecting:
cse.numConnecting += updateVal
case connectivity.TransientFailure:
cse.numTransientFailure += updateVal
}
}
// Evaluate.
if cse.numReady > 0 {
return connectivity.Ready
}
if cse.numConnecting > 0 {
return connectivity.Connecting
}
return connectivity.TransientFailure
}

View File

@ -45,8 +45,20 @@ type PickerBuilder interface {
// NewBalancerBuilder returns a balancer builder. The balancers
// built by this builder will use the picker builder to build pickers.
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
return NewBalancerBuilderWithConfig(name, pb, Config{})
}
// Config contains the config info about the base balancer builder.
type Config struct {
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
HealthCheck bool
}
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{
name: name,
pickerBuilder: pb,
config: config,
}
}

View File

@ -22,9 +22,9 @@
package roundrobin
import (
"context"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/grpclog"
@ -36,7 +36,7 @@ const Name = "round_robin"
// newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder {
return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
}
func init() {

View File

@ -178,6 +178,28 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
}
func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
if ccb.cc.curBalancerName != grpclbName {
var containsGRPCLB bool
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
containsGRPCLB = true
break
}
}
if containsGRPCLB {
// The current balancer is not grpclb, but addresses contain grpclb
// address. This means we failed to switch to grpclb, most likely
// because grpclb is not registered. Filter out all grpclb addresses
// from addrs before sending to balancer.
tempAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
if a.Type != resolver.GRPCLB {
tempAddrs = append(tempAddrs, a)
}
}
addrs = tempAddrs
}
}
select {
case <-ccb.resolverUpdateCh:
default:
@ -197,7 +219,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
if ccb.subConns == nil {
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
}
ac, err := ccb.cc.newAddrConn(addrs)
ac, err := ccb.cc.newAddrConn(addrs, opts)
if err != nil {
return nil, err
}
@ -229,8 +251,13 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc
if ccb.subConns == nil {
return
}
ccb.cc.csMgr.updateState(s)
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePicker(p)
ccb.cc.csMgr.updateState(s)
}
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
@ -257,6 +284,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
}
if !acbw.ac.tryUpdateAddrs(addrs) {
cc := acbw.ac.cc
opts := acbw.ac.scopts
acbw.ac.mu.Lock()
// Set old ac.acbw to nil so the Shutdown state update will be ignored
// by balancer.
@ -272,7 +300,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
return
}
ac, err := cc.newAddrConn(addrs)
ac, err := cc.newAddrConn(addrs, opts)
if err != nil {
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return

View File

@ -19,16 +19,14 @@
package grpc
import (
"context"
"strings"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
type balancerWrapperBuilder struct {
@ -315,12 +313,12 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
Metadata: a.Metadata,
}]
if !ok && failfast {
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
return nil, nil, balancer.ErrTransientFailure
}
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
// If the returned sc is not ready and RPC is failfast,
// return error, and this RPC will fail.
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
return nil, nil, balancer.ErrTransientFailure
}
}

View File

@ -0,0 +1,900 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import duration "github.com/golang/protobuf/ptypes/duration"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Enumerates the type of event
// Note the terminology is different from the RPC semantics
// definition, but the same meaning is expressed here.
type GrpcLogEntry_EventType int32
const (
GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
// Header sent from client to server
GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
// Header sent from server to client
GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
// Message sent from client to server
GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
// Message sent from server to client
GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
// A signal that client is done sending
GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
// Trailer indicates the end of the RPC.
// On client side, this event means a trailer was either received
// from the network or the gRPC library locally generated a status
// to inform the application about a failure.
// On server side, this event means the server application requested
// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
// this due to races on server side.
GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
// A signal that the RPC is cancelled. On client side, this
// indicates the client application requests a cancellation.
// On server side, this indicates that cancellation was detected.
// Note: This marks the end of the RPC. Events may arrive after
// this due to races. For example, on client side a trailer
// may arrive even though the application requested to cancel the RPC.
GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
)
var GrpcLogEntry_EventType_name = map[int32]string{
0: "EVENT_TYPE_UNKNOWN",
1: "EVENT_TYPE_CLIENT_HEADER",
2: "EVENT_TYPE_SERVER_HEADER",
3: "EVENT_TYPE_CLIENT_MESSAGE",
4: "EVENT_TYPE_SERVER_MESSAGE",
5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
6: "EVENT_TYPE_SERVER_TRAILER",
7: "EVENT_TYPE_CANCEL",
}
var GrpcLogEntry_EventType_value = map[string]int32{
"EVENT_TYPE_UNKNOWN": 0,
"EVENT_TYPE_CLIENT_HEADER": 1,
"EVENT_TYPE_SERVER_HEADER": 2,
"EVENT_TYPE_CLIENT_MESSAGE": 3,
"EVENT_TYPE_SERVER_MESSAGE": 4,
"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
"EVENT_TYPE_SERVER_TRAILER": 6,
"EVENT_TYPE_CANCEL": 7,
}
func (x GrpcLogEntry_EventType) String() string {
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
}
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
}
// Enumerates the entity that generates the log entry
type GrpcLogEntry_Logger int32
const (
GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1
GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
)
var GrpcLogEntry_Logger_name = map[int32]string{
0: "LOGGER_UNKNOWN",
1: "LOGGER_CLIENT",
2: "LOGGER_SERVER",
}
var GrpcLogEntry_Logger_value = map[string]int32{
"LOGGER_UNKNOWN": 0,
"LOGGER_CLIENT": 1,
"LOGGER_SERVER": 2,
}
func (x GrpcLogEntry_Logger) String() string {
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
}
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
}
type Address_Type int32
const (
Address_TYPE_UNKNOWN Address_Type = 0
// address is in 1.2.3.4 form
Address_TYPE_IPV4 Address_Type = 1
// address is in IPv6 canonical form (RFC5952 section 4)
// The scope is NOT included in the address string.
Address_TYPE_IPV6 Address_Type = 2
// address is UDS string
Address_TYPE_UNIX Address_Type = 3
)
var Address_Type_name = map[int32]string{
0: "TYPE_UNKNOWN",
1: "TYPE_IPV4",
2: "TYPE_IPV6",
3: "TYPE_UNIX",
}
var Address_Type_value = map[string]int32{
"TYPE_UNKNOWN": 0,
"TYPE_IPV4": 1,
"TYPE_IPV6": 2,
"TYPE_UNIX": 3,
}
func (x Address_Type) String() string {
return proto.EnumName(Address_Type_name, int32(x))
}
func (Address_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
}
// Log entry we store in binary logs
type GrpcLogEntry struct {
// The timestamp of the binary log message
Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
// from an unset value.
// Each call may have several log entries, they will all have the same call_id.
// Nothing is guaranteed about their value other than they are unique across
// different RPCs in the same gRPC process.
CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
// The entry sequence id for this call. The first GrpcLogEntry has a
// value of 1, to disambiguate from an unset value. The purpose of
// this field is to detect missing entries in environments where
// durability or ordering is not guaranteed.
SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
// The logger uses one of the following fields to record the payload,
// according to the type of the log entry.
//
// Types that are valid to be assigned to Payload:
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
// *GrpcLogEntry_Message
// *GrpcLogEntry_Trailer
Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
// true if payload does not represent the full message or metadata.
PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
// Peer address information, will only be recorded on the first
// incoming event. On client side, peer is logged on
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
// the case of trailers-only. On server side, peer is always
// logged on EVENT_TYPE_CLIENT_HEADER.
Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
func (*GrpcLogEntry) ProtoMessage() {}
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
}
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
}
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
}
func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
}
func (m *GrpcLogEntry) XXX_Size() int {
return xxx_messageInfo_GrpcLogEntry.Size(m)
}
func (m *GrpcLogEntry) XXX_DiscardUnknown() {
xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
}
var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
func (m *GrpcLogEntry) GetCallId() uint64 {
if m != nil {
return m.CallId
}
return 0
}
func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
if m != nil {
return m.SequenceIdWithinCall
}
return 0
}
func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
if m != nil {
return m.Type
}
return GrpcLogEntry_EVENT_TYPE_UNKNOWN
}
func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
if m != nil {
return m.Logger
}
return GrpcLogEntry_LOGGER_UNKNOWN
}
type isGrpcLogEntry_Payload interface {
isGrpcLogEntry_Payload()
}
type GrpcLogEntry_ClientHeader struct {
ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
}
type GrpcLogEntry_ServerHeader struct {
ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
}
type GrpcLogEntry_Message struct {
Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
}
type GrpcLogEntry_Trailer struct {
Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
}
func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
if m != nil {
return m.Payload
}
return nil
}
func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
return x.ClientHeader
}
return nil
}
func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
return x.ServerHeader
}
return nil
}
func (m *GrpcLogEntry) GetMessage() *Message {
if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
return x.Message
}
return nil
}
func (m *GrpcLogEntry) GetTrailer() *Trailer {
if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
return x.Trailer
}
return nil
}
func (m *GrpcLogEntry) GetPayloadTruncated() bool {
if m != nil {
return m.PayloadTruncated
}
return false
}
func (m *GrpcLogEntry) GetPeer() *Address {
if m != nil {
return m.Peer
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
(*GrpcLogEntry_Trailer)(nil),
}
}
func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*GrpcLogEntry)
// payload
switch x := m.Payload.(type) {
case *GrpcLogEntry_ClientHeader:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClientHeader); err != nil {
return err
}
case *GrpcLogEntry_ServerHeader:
b.EncodeVarint(7<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ServerHeader); err != nil {
return err
}
case *GrpcLogEntry_Message:
b.EncodeVarint(8<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Message); err != nil {
return err
}
case *GrpcLogEntry_Trailer:
b.EncodeVarint(9<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Trailer); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
}
return nil
}
func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*GrpcLogEntry)
switch tag {
case 6: // payload.client_header
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientHeader)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_ClientHeader{msg}
return true, err
case 7: // payload.server_header
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerHeader)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_ServerHeader{msg}
return true, err
case 8: // payload.message
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Message)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_Message{msg}
return true, err
case 9: // payload.trailer
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Trailer)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_Trailer{msg}
return true, err
default:
return false, nil
}
}
func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
m := msg.(*GrpcLogEntry)
// payload
switch x := m.Payload.(type) {
case *GrpcLogEntry_ClientHeader:
s := proto.Size(x.ClientHeader)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_ServerHeader:
s := proto.Size(x.ServerHeader)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_Message:
s := proto.Size(x.Message)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_Trailer:
s := proto.Size(x.Trailer)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ClientHeader struct {
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The name of the RPC method, which looks something like:
// /<service>/<method>
// Note the leading "/" character.
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual
// servers with different identities.
// The authority is the name of such a server identitiy.
// It is typically a portion of the URI in the form of
// <host> or <host>:<port> .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
// the RPC timeout
Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ClientHeader) Reset() { *m = ClientHeader{} }
func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
func (*ClientHeader) ProtoMessage() {}
func (*ClientHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
}
func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
}
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
}
func (dst *ClientHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClientHeader.Merge(dst, src)
}
func (m *ClientHeader) XXX_Size() int {
return xxx_messageInfo_ClientHeader.Size(m)
}
func (m *ClientHeader) XXX_DiscardUnknown() {
xxx_messageInfo_ClientHeader.DiscardUnknown(m)
}
var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
func (m *ClientHeader) GetMetadata() *Metadata {
if m != nil {
return m.Metadata
}
return nil
}
func (m *ClientHeader) GetMethodName() string {
if m != nil {
return m.MethodName
}
return ""
}
func (m *ClientHeader) GetAuthority() string {
if m != nil {
return m.Authority
}
return ""
}
func (m *ClientHeader) GetTimeout() *duration.Duration {
if m != nil {
return m.Timeout
}
return nil
}
type ServerHeader struct {
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServerHeader) Reset() { *m = ServerHeader{} }
func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
func (*ServerHeader) ProtoMessage() {}
func (*ServerHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
}
func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
}
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
}
func (dst *ServerHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerHeader.Merge(dst, src)
}
func (m *ServerHeader) XXX_Size() int {
return xxx_messageInfo_ServerHeader.Size(m)
}
func (m *ServerHeader) XXX_DiscardUnknown() {
xxx_messageInfo_ServerHeader.DiscardUnknown(m)
}
var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
func (m *ServerHeader) GetMetadata() *Metadata {
if m != nil {
return m.Metadata
}
return nil
}
type Trailer struct {
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The gRPC status code.
StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
// An original status message before any transport specific
// encoding.
StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
// The value of the 'grpc-status-details-bin' metadata key. If
// present, this is always an encoded 'google.rpc.Status' message.
StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Trailer) Reset() { *m = Trailer{} }
func (m *Trailer) String() string { return proto.CompactTextString(m) }
func (*Trailer) ProtoMessage() {}
func (*Trailer) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
}
func (m *Trailer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Trailer.Unmarshal(m, b)
}
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
}
func (dst *Trailer) XXX_Merge(src proto.Message) {
xxx_messageInfo_Trailer.Merge(dst, src)
}
func (m *Trailer) XXX_Size() int {
return xxx_messageInfo_Trailer.Size(m)
}
func (m *Trailer) XXX_DiscardUnknown() {
xxx_messageInfo_Trailer.DiscardUnknown(m)
}
var xxx_messageInfo_Trailer proto.InternalMessageInfo
func (m *Trailer) GetMetadata() *Metadata {
if m != nil {
return m.Metadata
}
return nil
}
func (m *Trailer) GetStatusCode() uint32 {
if m != nil {
return m.StatusCode
}
return 0
}
func (m *Trailer) GetStatusMessage() string {
if m != nil {
return m.StatusMessage
}
return ""
}
func (m *Trailer) GetStatusDetails() []byte {
if m != nil {
return m.StatusDetails
}
return nil
}
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
type Message struct {
// Length of the message. It may not be the same as the length of the
// data field, as the logging payload can be truncated or omitted.
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
// May be truncated or omitted.
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
}
func (m *Message) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Message.Unmarshal(m, b)
}
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
}
func (dst *Message) XXX_Merge(src proto.Message) {
xxx_messageInfo_Message.Merge(dst, src)
}
func (m *Message) XXX_Size() int {
return xxx_messageInfo_Message.Size(m)
}
func (m *Message) XXX_DiscardUnknown() {
xxx_messageInfo_Message.DiscardUnknown(m)
}
var xxx_messageInfo_Message proto.InternalMessageInfo
func (m *Message) GetLength() uint32 {
if m != nil {
return m.Length
}
return 0
}
func (m *Message) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
// A list of metadata pairs, used in the payload of client header,
// server header, and server trailer.
// Implementations may omit some entries to honor the header limits
// of GRPC_BINARY_LOG_CONFIG.
//
// Header keys added by gRPC are omitted. To be more specific,
// implementations will not log the following entries, and this is
// not to be treated as a truncation:
// - entries handled by grpc that are not user visible, such as those
// that begin with 'grpc-' (with exception of grpc-trace-bin)
// or keys like 'lb-token'
// - transport specific entries, including but not limited to:
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
// - entries added for call credentials
//
// Implementations must always log grpc-trace-bin if it is present.
// Practically speaking it will only be visible on server side because
// grpc-trace-bin is managed by low level client side mechanisms
// inaccessible from the application level. On server side, the
// header is just a normal metadata key.
// The pair will not count towards the size limit.
type Metadata struct {
Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {}
func (*Metadata) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
}
func (m *Metadata) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Metadata.Unmarshal(m, b)
}
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
}
func (dst *Metadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metadata.Merge(dst, src)
}
func (m *Metadata) XXX_Size() int {
return xxx_messageInfo_Metadata.Size(m)
}
func (m *Metadata) XXX_DiscardUnknown() {
xxx_messageInfo_Metadata.DiscardUnknown(m)
}
var xxx_messageInfo_Metadata proto.InternalMessageInfo
func (m *Metadata) GetEntry() []*MetadataEntry {
if m != nil {
return m.Entry
}
return nil
}
// A metadata key value pair
type MetadataEntry struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
func (*MetadataEntry) ProtoMessage() {}
func (*MetadataEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
}
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
}
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
}
func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetadataEntry.Merge(dst, src)
}
func (m *MetadataEntry) XXX_Size() int {
return xxx_messageInfo_MetadataEntry.Size(m)
}
func (m *MetadataEntry) XXX_DiscardUnknown() {
xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
}
var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
func (m *MetadataEntry) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *MetadataEntry) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
// Address information
type Address struct {
Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
// only for TYPE_IPV4 and TYPE_IPV6
IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
}
func (m *Address) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Address.Unmarshal(m, b)
}
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Address.Marshal(b, m, deterministic)
}
func (dst *Address) XXX_Merge(src proto.Message) {
xxx_messageInfo_Address.Merge(dst, src)
}
func (m *Address) XXX_Size() int {
return xxx_messageInfo_Address.Size(m)
}
func (m *Address) XXX_DiscardUnknown() {
xxx_messageInfo_Address.DiscardUnknown(m)
}
var xxx_messageInfo_Address proto.InternalMessageInfo
func (m *Address) GetType() Address_Type {
if m != nil {
return m.Type
}
return Address_TYPE_UNKNOWN
}
func (m *Address) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
func (m *Address) GetIpPort() uint32 {
if m != nil {
return m.IpPort
}
return 0
}
func init() {
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
}
func init() {
proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
}
var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
// 900 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
0xd4, 0x07, 0x00, 0x00,
}

View File

@ -19,7 +19,7 @@
package grpc
import (
"golang.org/x/net/context"
"context"
)
// Invoke sends the RPC request on the wire and returns after response is

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,8 @@
package connectivity
import (
"golang.org/x/net/context"
"context"
"google.golang.org/grpc/grpclog"
)

View File

@ -23,6 +23,7 @@
package credentials // import "google.golang.org/grpc/credentials"
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
@ -32,7 +33,7 @@ import (
"strings"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc/credentials/internal"
)
// alpnProtoStr are the specified application level protocols for gRPC.
@ -108,6 +109,25 @@ type TransportCredentials interface {
OverrideServerName(string) error
}
// Bundle is a combination of TransportCredentials and PerRPCCredentials.
//
// It also contains a mode switching method, so it can be used as a combination
// of different credential policies.
//
// Bundle cannot be used together with individual TransportCredentials.
// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
//
// This API is experimental.
type Bundle interface {
TransportCredentials() TransportCredentials
PerRPCCredentials() PerRPCCredentials
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
// existing Bundle may cause races.
//
// NewWithMode returns nil if the requested mode is not supported.
NewWithMode(mode string) (Bundle, error)
}
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
@ -119,8 +139,8 @@ func (t TLSInfo) AuthType() string {
return "tls"
}
// GetChannelzSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetChannelzSecurityValue() ChannelzSecurityValue {
// GetSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
v := &TLSChannelzSecurityValue{
StandardName: cipherSuiteLookup[t.State.CipherSuite],
}
@ -168,7 +188,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
case <-ctx.Done():
return nil, nil, ctx.Err()
}
return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@ -176,7 +196,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
if err := conn.Handshake(); err != nil {
return nil, nil, err
}
return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
}
func (c *tlsCreds) Clone() TransportCredentials {
@ -266,11 +286,6 @@ type OtherChannelzSecurityValue struct {
func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
type tlsConn struct {
*tls.Conn
rawConn net.Conn
}
var cipherSuiteLookup = map[uint16]string{
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
@ -290,4 +305,24 @@ var cipherSuiteLookup = map[uint16]string{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
}
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -1,57 +0,0 @@
// +build !go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
)
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
SessionTicketKey: cfg.SessionTicketKey,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
}

View File

@ -1,59 +0,0 @@
// +build go1.7,!go1.8
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
)
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
SessionTicketKey: cfg.SessionTicketKey,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
Renegotiation: cfg.Renegotiation,
}
}

View File

@ -1,46 +0,0 @@
// +build go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
)
func init() {
cipherSuiteLookup[tls.TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256"
cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
}
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -0,0 +1,61 @@
// +build !appengine
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package internal contains credentials-internal code.
package internal
import (
"net"
"syscall"
)
type sysConn = syscall.Conn
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
// SyscallConn() (the method in interface syscall.Conn) is explicitly
// implemented on this type,
//
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
// help here).
type syscallConn struct {
net.Conn
// sysConn is a type alias of syscall.Conn. It's necessary because the name
// `Conn` collides with `net.Conn`.
sysConn
}
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
// will be used for read/write.
//
// This function returns newConn if rawConn doesn't implement syscall.Conn.
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
sysConn, ok := rawConn.(syscall.Conn)
if !ok {
return newConn
}
return &syscallConn{
Conn: newConn,
sysConn: sysConn,
}
}

View File

@ -1,8 +1,8 @@
// +build go1.8
// +build appengine
/*
*
* Copyright 2017 gRPC authors.
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -18,11 +18,13 @@
*
*/
package naming
package internal
import "net"
var (
lookupHost = net.DefaultResolver.LookupHost
lookupSRV = net.DefaultResolver.LookupSRV
import (
"net"
)
// WrapSyscallConn returns newConn on appengine.
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
return newConn
}

View File

@ -19,11 +19,11 @@
package grpc
import (
"context"
"fmt"
"net"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
@ -55,10 +55,12 @@ type dialOptions struct {
balancerBuilder balancer.Builder
// This is to support grpclb.
resolverBuilder resolver.Builder
waitForHandshake bool
reqHandshake envconfig.RequireHandshakeSetting
channelzParentID int64
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
healthCheckFunc internal.HealthChecker
}
// DialOption configures how we set up the connection.
@ -91,10 +93,13 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
}
// WithWaitForHandshake blocks until the initial settings frame is received from
// the server before assigning RPCs to the connection. Experimental API.
// the server before assigning RPCs to the connection.
//
// Deprecated: this is the default behavior, and this option will be removed
// after the 1.18 release.
func WithWaitForHandshake() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.waitForHandshake = true
o.reqHandshake = envconfig.RequireHandshakeOn
})
}
@ -286,7 +291,8 @@ func WithInsecure() DialOption {
}
// WithTransportCredentials returns a DialOption which configures a connection
// level security credentials (e.g., TLS/SSL).
// level security credentials (e.g., TLS/SSL). This should not be used together
// with WithCredentialsBundle.
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.TransportCredentials = creds
@ -301,6 +307,17 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
})
}
// WithCredentialsBundle returns a DialOption to set a credentials bundle for
// the ClientConn.WithCreds. This should not be used together with
// WithTransportCredentials.
//
// This API is experimental.
func WithCredentialsBundle(b credentials.Bundle) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.CredsBundle = b
})
}
// WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present.
//
@ -320,6 +337,7 @@ func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
func init() {
internal.WithContextDialer = withContextDialer
internal.WithResolverBuilder = withResolverBuilder
internal.WithHealthCheckFunc = withHealthCheckFunc
}
// WithDialer returns a DialOption that specifies a function to use for dialing
@ -349,6 +367,9 @@ func WithStatsHandler(h stats.Handler) DialOption {
// error, gRPC will fail the connection to the network address and won't try to
// reconnect. The default value of FailOnNonTempDialError is false.
//
// FailOnNonTempDialError only affects the initial dial, and does not do
// anything useful unless you are also using WithBlock().
//
// This is an EXPERIMENTAL API.
func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
@ -439,9 +460,30 @@ func WithMaxHeaderListSize(s uint32) DialOption {
})
}
// WithDisableHealthCheck disables the LB channel health checking for all SubConns of this ClientConn.
//
// This API is EXPERIMENTAL.
func WithDisableHealthCheck() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableHealthCheck = true
})
}
// withHealthCheckFunc replaces the default health check function with the provided one. It makes
// tests easier to change the health check function.
//
// For testing purpose only.
func withHealthCheckFunc(f internal.HealthChecker) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.healthCheckFunc = f
})
}
func defaultDialOptions() dialOptions {
return dialOptions{
disableRetry: !envconfig.Retry,
disableRetry: !envconfig.Retry,
reqHandshake: envconfig.RequireHandshake,
healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{
WriteBufferSize: defaultWriteBufSize,
ReadBufferSize: defaultReadBufSize,

20
vendor/google.golang.org/grpc/go.mod generated vendored Normal file
View File

@ -0,0 +1,20 @@
module google.golang.org/grpc
require (
cloud.google.com/go v0.26.0 // indirect
github.com/client9/misspell v0.3.4
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.1.1
github.com/golang/protobuf v1.2.0
github.com/kisielk/gotool v1.0.0 // indirect
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522
golang.org/x/text v0.3.0 // indirect
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52
google.golang.org/appengine v1.1.0 // indirect
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
honnef.co/go/tools v0.0.0-20180728063816-88497007e858
)

32
vendor/google.golang.org/grpc/go.sum generated vendored Normal file
View File

@ -0,0 +1,32 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858 h1:wN+eVZ7U+gqdqkec6C6VXR1OFf9a5Ul9ETzeYsYv20g=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -1,71 +0,0 @@
// +build go1.6,!go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"fmt"
"io"
"net"
"net/http"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req.Cancel = ctx.Done()
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if err == nil || err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
return status.Error(codes.Internal, err.Error())
}
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}

View File

@ -1,72 +0,0 @@
// +build go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"context"
"fmt"
"io"
"net"
"net/http"
netctx "golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if err == nil || err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
return status.Error(codes.Internal, err.Error())
}
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}

107
vendor/google.golang.org/grpc/health/client.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package health
import (
"context"
"fmt"
"io"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/status"
)
const maxDelay = 120 * time.Second
var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
var backoffFunc = func(ctx context.Context, retries int) bool {
d := backoffStrategy.Backoff(retries)
timer := time.NewTimer(d)
select {
case <-timer.C:
return true
case <-ctx.Done():
timer.Stop()
return false
}
}
func init() {
internal.HealthCheckFunc = clientHealthCheck
}
func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
tryCnt := 0
retryConnection:
for {
// Backs off if the connection has failed in some way without receiving a message in the previous retry.
if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) {
return nil
}
tryCnt++
if ctx.Err() != nil {
return nil
}
rawS, err := newStream()
if err != nil {
continue retryConnection
}
s, ok := rawS.(grpc.ClientStream)
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
if !ok {
reportHealth(true)
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
}
if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF {
// Stream should have been closed, so we can safely continue to create a new stream.
continue retryConnection
}
s.CloseSend()
resp := new(healthpb.HealthCheckResponse)
for {
err = s.RecvMsg(resp)
// Reports healthy for the LBing purposes if health check is not implemented in the server.
if status.Code(err) == codes.Unimplemented {
reportHealth(true)
return err
}
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
if err != nil {
reportHealth(false)
continue retryConnection
}
// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
tryCnt = 0
reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
}
}
}

View File

@ -26,27 +26,30 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type HealthCheckResponse_ServingStatus int32
const (
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
)
var HealthCheckResponse_ServingStatus_name = map[int32]string{
0: "UNKNOWN",
1: "SERVING",
2: "NOT_SERVING",
3: "SERVICE_UNKNOWN",
}
var HealthCheckResponse_ServingStatus_value = map[string]int32{
"UNKNOWN": 0,
"SERVING": 1,
"NOT_SERVING": 2,
"UNKNOWN": 0,
"SERVING": 1,
"NOT_SERVING": 2,
"SERVICE_UNKNOWN": 3,
}
func (x HealthCheckResponse_ServingStatus) String() string {
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
}
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_health_85731b6c49265086, []int{1, 0}
return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
}
type HealthCheckRequest struct {
@ -60,7 +63,7 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
func (*HealthCheckRequest) ProtoMessage() {}
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_health_85731b6c49265086, []int{0}
return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
}
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
@ -98,7 +101,7 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
func (*HealthCheckResponse) ProtoMessage() {}
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_health_85731b6c49265086, []int{1}
return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
}
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
@ -143,7 +146,25 @@ const _ = grpc.SupportPackageIsVersion4
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type HealthClient interface {
// If the requested service is unknown, the call will fail with status
// NOT_FOUND.
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
// serving status. It will then subsequently send a new message whenever
// the service's serving status changes.
//
// If the requested service is unknown when the call is received, the
// server will send a message setting the serving status to
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
// future point, the serving status of the service becomes known, the
// server will send a new message with the service's serving status.
//
// If the call terminates with status UNIMPLEMENTED, then clients
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
}
type healthClient struct {
@ -163,9 +184,59 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
return out, nil
}
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
if err != nil {
return nil, err
}
x := &healthWatchClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Health_WatchClient interface {
Recv() (*HealthCheckResponse, error)
grpc.ClientStream
}
type healthWatchClient struct {
grpc.ClientStream
}
func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
m := new(HealthCheckResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// HealthServer is the server API for Health service.
type HealthServer interface {
// If the requested service is unknown, the call will fail with status
// NOT_FOUND.
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
// serving status. It will then subsequently send a new message whenever
// the service's serving status changes.
//
// If the requested service is unknown when the call is received, the
// server will send a message setting the serving status to
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
// future point, the serving status of the service becomes known, the
// server will send a new message with the service's serving status.
//
// If the call terminates with status UNIMPLEMENTED, then clients
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
Watch(*HealthCheckRequest, Health_WatchServer) error
}
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
@ -190,6 +261,27 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(HealthCheckRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
}
type Health_WatchServer interface {
Send(*HealthCheckResponse) error
grpc.ServerStream
}
type healthWatchServer struct {
grpc.ServerStream
}
func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
return x.ServerStream.SendMsg(m)
}
var _Health_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpc.health.v1.Health",
HandlerType: (*HealthServer)(nil),
@ -199,29 +291,37 @@ var _Health_serviceDesc = grpc.ServiceDesc{
Handler: _Health_Check_Handler,
},
},
Streams: []grpc.StreamDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Watch",
Handler: _Health_Watch_Handler,
ServerStreams: true,
},
},
Metadata: "grpc/health/v1/health.proto",
}
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_85731b6c49265086) }
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
var fileDescriptor_health_85731b6c49265086 = []byte{
// 271 bytes of a gzipped FileDescriptorProto
var fileDescriptor_health_6b1a06aa67f91efd = []byte{
// 297 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, 0x84,
0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, 0xb8,
0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, 0x70,
0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, 0xf3,
0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x91, 0x63, 0x03, 0xc4, 0x8e, 0x87, 0xb0, 0xe3, 0xcb, 0x0c,
0x57, 0x31, 0xf1, 0xb9, 0x83, 0x4c, 0x83, 0x18, 0xa1, 0x17, 0x66, 0x98, 0xc4, 0x06, 0x8e, 0x24,
0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0x66, 0x81, 0xcb, 0xc3, 0x01, 0x00, 0x00,
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
}

View File

@ -1,72 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
//go:generate ./regenerate.sh
// Package health provides some utility functions to health-check a server. The implementation
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
package health
import (
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
// Server implements `service Health`.
type Server struct {
mu sync.Mutex
// statusMap stores the serving status of the services this Server monitors.
statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
}
// NewServer returns a new Server.
func NewServer() *Server {
return &Server{
statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus),
}
}
// Check implements `service Health`.
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if in.Service == "" {
// check the server overall health status.
return &healthpb.HealthCheckResponse{
Status: healthpb.HealthCheckResponse_SERVING,
}, nil
}
if status, ok := s.statusMap[in.Service]; ok {
return &healthpb.HealthCheckResponse{
Status: status,
}, nil
}
return nil, status.Error(codes.NotFound, "unknown service")
}
// SetServingStatus is called when need to reset the serving status of a service
// or insert a new service entry into the statusMap.
func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
s.mu.Lock()
s.statusMap[service] = status
s.mu.Unlock()
}

165
vendor/google.golang.org/grpc/health/server.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
//go:generate ./regenerate.sh
// Package health provides a service that exposes server's health and it must be
// imported to enable support for client-side health checks.
package health
import (
"context"
"sync"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
// Server implements `service Health`.
type Server struct {
mu sync.Mutex
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
// will stay in NOT_SERVING.
shutdown bool
// statusMap stores the serving status of the services this Server monitors.
statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus
}
// NewServer returns a new Server.
func NewServer() *Server {
return &Server{
statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING},
updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus),
}
}
// Check implements `service Health`.
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if servingStatus, ok := s.statusMap[in.Service]; ok {
return &healthpb.HealthCheckResponse{
Status: servingStatus,
}, nil
}
return nil, status.Error(codes.NotFound, "unknown service")
}
// Watch implements `service Health`.
func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error {
service := in.Service
// update channel is used for getting service status updates.
update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1)
s.mu.Lock()
// Puts the initial status to the channel.
if servingStatus, ok := s.statusMap[service]; ok {
update <- servingStatus
} else {
update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN
}
// Registers the update channel to the correct place in the updates map.
if _, ok := s.updates[service]; !ok {
s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus)
}
s.updates[service][stream] = update
defer func() {
s.mu.Lock()
delete(s.updates[service], stream)
s.mu.Unlock()
}()
s.mu.Unlock()
var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1
for {
select {
// Status updated. Sends the up-to-date status to the client.
case servingStatus := <-update:
if lastSentStatus == servingStatus {
continue
}
lastSentStatus = servingStatus
err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus})
if err != nil {
return status.Error(codes.Canceled, "Stream has ended.")
}
// Context done. Removes the update channel from the updates map.
case <-stream.Context().Done():
return status.Error(codes.Canceled, "Stream has ended.")
}
}
}
// SetServingStatus is called when need to reset the serving status of a service
// or insert a new service entry into the statusMap.
func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
s.mu.Lock()
defer s.mu.Unlock()
if s.shutdown {
grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
return
}
s.setServingStatusLocked(service, servingStatus)
}
func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
s.statusMap[service] = servingStatus
for _, update := range s.updates[service] {
// Clears previous updates, that are not sent to the client, from the channel.
// This can happen if the client is not reading and the server gets flow control limited.
select {
case <-update:
default:
}
// Puts the most recent update to the channel.
update <- servingStatus
}
}
// Shutdown sets all serving status to NOT_SERVING, and configures the server to
// ignore all future status changes.
//
// This changes serving status for all services. To set status for a perticular
// services, call SetServingStatus().
func (s *Server) Shutdown() {
s.mu.Lock()
defer s.mu.Unlock()
s.shutdown = true
for service := range s.statusMap {
s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING)
}
}
// Resume sets all serving status to SERVING, and configures the server to
// accept all future status changes.
//
// This changes serving status for all services. To set status for a perticular
// services, call SetServingStatus().
func (s *Server) Resume() {
s.mu.Lock()
defer s.mu.Unlock()
s.shutdown = false
for service := range s.statusMap {
s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING)
}
}

View File

@ -1,6 +1,6 @@
#!/bin/bash
TMP=$(mktemp -d /tmp/sdk.XXX) \
&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.64.zip" \
&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
&& unzip -q $TMP.zip -d $TMP \
&& export PATH="$PATH:$TMP/go_appengine"

View File

@ -19,7 +19,7 @@
package grpc
import (
"golang.org/x/net/context"
"context"
)
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.

View File

@ -0,0 +1,167 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package binarylog implementation binary logging as defined in
// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
package binarylog
import (
"fmt"
"os"
"google.golang.org/grpc/grpclog"
)
// Logger is the global binary logger. It can be used to get binary logger for
// each method.
type Logger interface {
getMethodLogger(methodName string) *MethodLogger
}
// binLogger is the global binary logger for the binary. One of this should be
// built at init time from the configuration (environment varialbe or flags).
//
// It is used to get a methodLogger for each individual method.
var binLogger Logger
// SetLogger sets the binarg logger.
//
// Only call this at init time.
func SetLogger(l Logger) {
binLogger = l
}
// GetMethodLogger returns the methodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func GetMethodLogger(methodName string) *MethodLogger {
if binLogger == nil {
return nil
}
return binLogger.getMethodLogger(methodName)
}
func init() {
const envStr = "GRPC_BINARY_LOG_FILTER"
configStr := os.Getenv(envStr)
binLogger = NewLoggerFromConfigString(configStr)
}
type methodLoggerConfig struct {
// Max length of header and message.
hdr, msg uint64
}
type logger struct {
all *methodLoggerConfig
services map[string]*methodLoggerConfig
methods map[string]*methodLoggerConfig
blacklist map[string]struct{}
}
// newEmptyLogger creates an empty logger. The map fields need to be filled in
// using the set* functions.
func newEmptyLogger() *logger {
return &logger{}
}
// Set method logger for "*".
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
if l.all != nil {
return fmt.Errorf("conflicting global rules found")
}
l.all = ml
return nil
}
// Set method logger for "service/*".
//
// New methodLogger with same service overrides the old one.
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
if _, ok := l.services[service]; ok {
return fmt.Errorf("conflicting rules for service %v found", service)
}
if l.services == nil {
l.services = make(map[string]*methodLoggerConfig)
}
l.services[service] = ml
return nil
}
// Set method logger for "service/method".
//
// New methodLogger with same method overrides the old one.
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
if _, ok := l.blacklist[method]; ok {
return fmt.Errorf("conflicting rules for method %v found", method)
}
if _, ok := l.methods[method]; ok {
return fmt.Errorf("conflicting rules for method %v found", method)
}
if l.methods == nil {
l.methods = make(map[string]*methodLoggerConfig)
}
l.methods[method] = ml
return nil
}
// Set blacklist method for "-service/method".
func (l *logger) setBlacklist(method string) error {
if _, ok := l.blacklist[method]; ok {
return fmt.Errorf("conflicting rules for method %v found", method)
}
if _, ok := l.methods[method]; ok {
return fmt.Errorf("conflicting rules for method %v found", method)
}
if l.blacklist == nil {
l.blacklist = make(map[string]struct{})
}
l.blacklist[method] = struct{}{}
return nil
}
// getMethodLogger returns the methodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
s, m, err := parseMethodName(methodName)
if err != nil {
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
if ml, ok := l.methods[s+"/"+m]; ok {
return newMethodLogger(ml.hdr, ml.msg)
}
if _, ok := l.blacklist[s+"/"+m]; ok {
return nil
}
if ml, ok := l.services[s]; ok {
return newMethodLogger(ml.hdr, ml.msg)
}
if l.all == nil {
return nil
}
return newMethodLogger(l.all.hdr, l.all.msg)
}

View File

@ -0,0 +1,42 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// This file contains exported variables/functions that are exported for testing
// only.
//
// An ideal way for this would be to put those in a *_test.go but in binarylog
// package. But this doesn't work with staticcheck with go module. Error was:
// "MdToMetadataProto not declared by package binarylog". This could be caused
// by the way staticcheck looks for files for a certain package, which doesn't
// support *_test.go files.
//
// Move those to binary_test.go when staticcheck is fixed.
package binarylog
var (
// AllLogger is a logger that logs all headers/messages for all RPCs. It's
// for testing only.
AllLogger = NewLoggerFromConfigString("*")
// MdToMetadataProto converts metadata to a binary logging proto message.
// It's for testing only.
MdToMetadataProto = mdToMetadataProto
// AddrToProto converts an address to a binary logging proto message. It's
// for testing only.
AddrToProto = addrToProto
)

View File

@ -0,0 +1,210 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package binarylog
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"google.golang.org/grpc/grpclog"
)
// NewLoggerFromConfigString reads the string and build a logger. It can be used
// to build a new logger and assign it to binarylog.Logger.
//
// Example filter config strings:
// - "" Nothing will be logged
// - "*" All headers and messages will be fully logged.
// - "*{h}" Only headers will be logged.
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
// - "Foo/*" Logs every method in service Foo
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
// /Foo/Bar, logs all headers and messages in every other method in service
// Foo.
//
// If two configs exist for one certain method or service, the one specified
// later overrides the privous config.
func NewLoggerFromConfigString(s string) Logger {
if s == "" {
return nil
}
l := newEmptyLogger()
methods := strings.Split(s, ",")
for _, method := range methods {
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
grpclog.Warningf("failed to parse binary log config: %v", err)
return nil
}
}
return l
}
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
// it to the right map in the logger.
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
// "" is invalid.
if config == "" {
return errors.New("empty string is not a valid method binary logging config")
}
// "-service/method", blacklist, no * or {} allowed.
if config[0] == '-' {
s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
if err != nil {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
if m == "*" {
return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
}
if suffix != "" {
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
}
if err := l.setBlacklist(s + "/" + m); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
return nil
}
// "*{h:256;m:256}"
if config[0] == '*' {
hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
if err != nil {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
return nil
}
s, m, suffix, err := parseMethodConfigAndSuffix(config)
if err != nil {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
if err != nil {
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
}
if m == "*" {
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
} else {
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
}
return nil
}
const (
// TODO: this const is only used by env_config now. But could be useful for
// other config. Move to binarylog.go if necessary.
maxUInt = ^uint64(0)
// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
// expected output.
longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
// For suffix from above, "{h:123,m:123}". See test for expected output.
optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
)
var (
longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
)
// Turn "service/method{h;m}" into "service", "method", "{h;m}".
func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
// Regexp result:
//
// in: "p.s/m{h:123,m:123}",
// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
match := longMethodConfigRegexp.FindStringSubmatch(c)
if match == nil {
return "", "", "", fmt.Errorf("%q contains invalid substring", c)
}
service = match[1]
method = match[2]
suffix = match[3]
return
}
// Turn "{h:123;m:345}" into 123, 345.
//
// Return maxUInt if length is unspecified.
func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
if c == "" {
return maxUInt, maxUInt, nil
}
// Header config only.
if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
if s := match[1]; s != "" {
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
}
return hdrLenStr, 0, nil
}
return maxUInt, 0, nil
}
// Message config only.
if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
if s := match[1]; s != "" {
msgLenStr, err = strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
}
return 0, msgLenStr, nil
}
return 0, maxUInt, nil
}
// Header and message config both.
if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
// Both hdr and msg are specified, but one or two of them might be empty.
hdrLenStr = maxUInt
msgLenStr = maxUInt
if s := match[1]; s != "" {
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
}
}
if s := match[2]; s != "" {
msgLenStr, err = strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
}
}
return hdrLenStr, msgLenStr, nil
}
return 0, 0, fmt.Errorf("%q contains invalid substring", c)
}

View File

@ -0,0 +1,426 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package binarylog
import (
"net"
"strings"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
type callIDGenerator struct {
id uint64
}
func (g *callIDGenerator) next() uint64 {
id := atomic.AddUint64(&g.id, 1)
return id
}
// reset is for testing only, and doesn't need to be thread safe.
func (g *callIDGenerator) reset() {
g.id = 0
}
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
type MethodLogger struct {
headerMaxLen, messageMaxLen uint64
callID uint64
idWithinCallGen *callIDGenerator
sink Sink // TODO(blog): make this plugable.
}
func newMethodLogger(h, m uint64) *MethodLogger {
return &MethodLogger{
headerMaxLen: h,
messageMaxLen: m,
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
sink: defaultSink, // TODO(blog): make it plugable.
}
}
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *MethodLogger) Log(c LogEntryConfig) {
m := c.toProto()
timestamp, _ := ptypes.TimestampProto(time.Now())
m.Timestamp = timestamp
m.CallId = ml.callID
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
switch pay := m.Payload.(type) {
case *pb.GrpcLogEntry_ClientHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
case *pb.GrpcLogEntry_ServerHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
case *pb.GrpcLogEntry_Message:
m.PayloadTruncated = ml.truncateMessage(pay.Message)
}
ml.sink.Write(m)
}
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
var (
bytesLimit = ml.headerMaxLen
index int
)
// At the end of the loop, index will be the first entry where the total
// size is greater than the limit:
//
// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
for ; index < len(mdPb.Entry); index++ {
entry := mdPb.Entry[index]
if entry.Key == "grpc-trace-bin" {
// "grpc-trace-bin" is a special key. It's kept in the log entry,
// but not counted towards the size limit.
continue
}
currentEntryLen := uint64(len(entry.Value))
if currentEntryLen > bytesLimit {
break
}
bytesLimit -= currentEntryLen
}
truncated = index < len(mdPb.Entry)
mdPb.Entry = mdPb.Entry[:index]
return truncated
}
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}
if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
return false
}
msgPb.Data = msgPb.Data[:ml.messageMaxLen]
return true
}
// LogEntryConfig represents the configuration for binary log entry.
type LogEntryConfig interface {
toProto() *pb.GrpcLogEntry
}
// ClientHeader configs the binary log entry to be a ClientHeader entry.
type ClientHeader struct {
OnClientSide bool
Header metadata.MD
MethodName string
Authority string
Timeout time.Duration
// PeerAddr is required only when it's on server side.
PeerAddr net.Addr
}
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
// This function doesn't need to set all the fields (e.g. seq ID). The Log
// function will set the fields when necessary.
clientHeader := &pb.ClientHeader{
Metadata: mdToMetadataProto(c.Header),
MethodName: c.MethodName,
Authority: c.Authority,
}
if c.Timeout > 0 {
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
Payload: &pb.GrpcLogEntry_ClientHeader{
ClientHeader: clientHeader,
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
}
return ret
}
// ServerHeader configs the binary log entry to be a ServerHeader entry.
type ServerHeader struct {
OnClientSide bool
Header metadata.MD
// PeerAddr is required only when it's on client side.
PeerAddr net.Addr
}
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
Payload: &pb.GrpcLogEntry_ServerHeader{
ServerHeader: &pb.ServerHeader{
Metadata: mdToMetadataProto(c.Header),
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
}
return ret
}
// ClientMessage configs the binary log entry to be a ClientMessage entry.
type ClientMessage struct {
OnClientSide bool
// Message can be a proto.Message or []byte. Other messages formats are not
// supported.
Message interface{}
}
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
var (
data []byte
err error
)
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
Payload: &pb.GrpcLogEntry_Message{
Message: &pb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
// ServerMessage configs the binary log entry to be a ServerMessage entry.
type ServerMessage struct {
OnClientSide bool
// Message can be a proto.Message or []byte. Other messages formats are not
// supported.
Message interface{}
}
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
var (
data []byte
err error
)
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
Payload: &pb.GrpcLogEntry_Message{
Message: &pb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
type ClientHalfClose struct {
OnClientSide bool
}
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
Payload: nil, // No payload here.
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
type ServerTrailer struct {
OnClientSide bool
Trailer metadata.MD
// Err is the status error.
Err error
// PeerAddr is required only when it's on client side and the RPC is trailer
// only.
PeerAddr net.Addr
}
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
grpclog.Info("binarylogging: error in trailer is not a status error")
}
var (
detailsBytes []byte
err error
)
stProto := st.Proto()
if stProto != nil && len(stProto.Details) != 0 {
detailsBytes, err = proto.Marshal(stProto)
if err != nil {
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
Payload: &pb.GrpcLogEntry_Trailer{
Trailer: &pb.Trailer{
Metadata: mdToMetadataProto(c.Trailer),
StatusCode: uint32(st.Code()),
StatusMessage: st.Message(),
StatusDetails: detailsBytes,
},
},
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
}
return ret
}
// Cancel configs the binary log entry to be a Cancel entry.
type Cancel struct {
OnClientSide bool
}
func (c *Cancel) toProto() *pb.GrpcLogEntry {
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
Payload: nil,
}
if c.OnClientSide {
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
} else {
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
// metadataKeyOmit returns whether the metadata entry with this key should be
// omitted.
func metadataKeyOmit(key string) bool {
switch key {
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
return true
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
return false
}
if strings.HasPrefix(key, "grpc-") {
return true
}
return false
}
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
ret := &pb.Metadata{}
for k, vv := range md {
if metadataKeyOmit(k) {
continue
}
for _, v := range vv {
ret.Entry = append(ret.Entry,
&pb.MetadataEntry{
Key: k,
Value: []byte(v),
},
)
}
}
return ret
}
func addrToProto(addr net.Addr) *pb.Address {
ret := &pb.Address{}
switch a := addr.(type) {
case *net.TCPAddr:
if a.IP.To4() != nil {
ret.Type = pb.Address_TYPE_IPV4
} else if a.IP.To16() != nil {
ret.Type = pb.Address_TYPE_IPV6
} else {
ret.Type = pb.Address_TYPE_UNKNOWN
// Do not set address and port fields.
break
}
ret.Address = a.IP.String()
ret.IpPort = uint32(a.Port)
case *net.UnixAddr:
ret.Type = pb.Address_TYPE_UNIX
ret.Address = a.String()
default:
ret.Type = pb.Address_TYPE_UNKNOWN
}
return ret
}

View File

@ -0,0 +1,33 @@
#!/bin/bash
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eux -o pipefail
TMP=$(mktemp -d)
function finish {
rm -rf "$TMP"
}
trap finish EXIT
pushd "$TMP"
mkdir -p grpc/binarylog/grpc_binarylog_v1
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
popd
rm -f ./grpc_binarylog_v1/*.pb.go
cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/

View File

@ -0,0 +1,162 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package binarylog
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
"github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/grpclog"
)
var (
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
)
// SetDefaultSink sets the sink where binary logs will be written to.
//
// Not thread safe. Only set during initialization.
func SetDefaultSink(s Sink) {
if defaultSink != nil {
defaultSink.Close()
}
defaultSink = s
}
// Sink writes log entry into the binary log sink.
type Sink interface {
// Write will be called to write the log entry into the sink.
//
// It should be thread-safe so it can be called in parallel.
Write(*pb.GrpcLogEntry) error
// Close will be called when the Sink is replaced by a new Sink.
Close() error
}
type noopSink struct{}
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
func (ns *noopSink) Close() error { return nil }
// newWriterSink creates a binary log sink with the given writer.
//
// Write() marshalls the proto message and writes it to the given writer. Each
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// No buffer is done, Close() doesn't try to close the writer.
func newWriterSink(w io.Writer) *writerSink {
return &writerSink{out: w}
}
type writerSink struct {
out io.Writer
}
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
}
hdr := make([]byte, 4)
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
if _, err := ws.out.Write(hdr); err != nil {
return err
}
if _, err := ws.out.Write(b); err != nil {
return err
}
return nil
}
func (ws *writerSink) Close() error { return nil }
type bufWriteCloserSink struct {
mu sync.Mutex
closer io.Closer
out *writerSink // out is built on buf.
buf *bufio.Writer // buf is kept for flush.
writeStartOnce sync.Once
writeTicker *time.Ticker
}
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
// Start the write loop when Write is called.
fs.writeStartOnce.Do(fs.startFlushGoroutine)
fs.mu.Lock()
if err := fs.out.Write(e); err != nil {
fs.mu.Unlock()
return err
}
fs.mu.Unlock()
return nil
}
const (
bufFlushDuration = 60 * time.Second
)
func (fs *bufWriteCloserSink) startFlushGoroutine() {
fs.writeTicker = time.NewTicker(bufFlushDuration)
go func() {
for range fs.writeTicker.C {
fs.mu.Lock()
fs.buf.Flush()
fs.mu.Unlock()
}
}()
}
func (fs *bufWriteCloserSink) Close() error {
if fs.writeTicker != nil {
fs.writeTicker.Stop()
}
fs.mu.Lock()
fs.buf.Flush()
fs.closer.Close()
fs.out.Close()
fs.mu.Unlock()
return nil
}
func newBufWriteCloserSink(o io.WriteCloser) Sink {
bufW := bufio.NewWriter(o)
return &bufWriteCloserSink{
closer: o,
out: newWriterSink(bufW),
buf: bufW,
}
}
// NewTempFileSink creates a temp file and returns a Sink that writes to this
// file.
func NewTempFileSink() (Sink, error) {
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %v", err)
}
return newBufWriteCloserSink(tempFile), nil
}

View File

@ -1,5 +1,3 @@
// +build go1.9,!appengine
/*
*
* Copyright 2018 gRPC authors.
@ -18,18 +16,26 @@
*
*/
package credentials
package binarylog
import (
"errors"
"syscall"
"strings"
)
// implements the syscall.Conn interface
func (c tlsConn) SyscallConn() (syscall.RawConn, error) {
conn, ok := c.rawConn.(syscall.Conn)
if !ok {
return nil, errors.New("RawConn does not implement syscall.Conn")
// parseMethodName splits service and method from the input. It expects format
// "/service/method".
//
// TODO: move to internal/grpcutil.
func parseMethodName(methodName string) (service, method string, _ error) {
if !strings.HasPrefix(methodName, "/") {
return "", "", errors.New("invalid method name: should start with /")
}
return conn.SyscallConn()
methodName = methodName[1:]
pos := strings.LastIndex(methodName, "/")
if pos < 0 {
return "", "", errors.New("invalid method name: suffix /method is missing")
}
return methodName[:pos], methodName[pos+1:], nil
}

View File

@ -27,16 +27,22 @@ import (
"sort"
"sync"
"sync/atomic"
"time"
"google.golang.org/grpc/grpclog"
)
const (
defaultMaxTraceEntry int32 = 30
)
var (
db dbWrapper
idGen idGenerator
// EntryPerPage defines the number of channelz entries to be shown on a web page.
EntryPerPage = 50
curState int32
EntryPerPage = int64(50)
curState int32
maxTraceEntry = defaultMaxTraceEntry
)
// TurnOn turns on channelz data collection.
@ -52,6 +58,22 @@ func IsOn() bool {
return atomic.CompareAndSwapInt32(&curState, 1, 1)
}
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
// Setting it to 0 will disable channel tracing.
func SetMaxTraceEntry(i int32) {
atomic.StoreInt32(&maxTraceEntry, i)
}
// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
func ResetMaxTraceEntryToDefault() {
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
}
func getMaxTraceEntry() int {
i := atomic.LoadInt32(&maxTraceEntry)
return int(i)
}
// dbWarpper wraps around a reference to internal channelz data storage, and
// provide synchronized functionality to set and get the reference.
type dbWrapper struct {
@ -91,20 +113,20 @@ func NewChannelzStorage() {
// boolean indicating whether there's more top channels to be queried for.
//
// The arg id specifies that only top channel with id at or above it will be included
// in the result. The returned slice is up to a length of EntryPerPage, and is
// sorted in ascending id order.
func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
return db.get().GetTopChannels(id)
// in the result. The returned slice is up to a length of the arg maxResults or
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
return db.get().GetTopChannels(id, maxResults)
}
// GetServers returns a slice of server's ServerMetric, along with a
// boolean indicating whether there's more servers to be queried for.
//
// The arg id specifies that only server with id at or above it will be included
// in the result. The returned slice is up to a length of EntryPerPage, and is
// sorted in ascending id order.
func GetServers(id int64) ([]*ServerMetric, bool) {
return db.get().GetServers(id)
// in the result. The returned slice is up to a length of the arg maxResults or
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
return db.get().GetServers(id, maxResults)
}
// GetServerSockets returns a slice of server's (identified by id) normal socket's
@ -112,10 +134,10 @@ func GetServers(id int64) ([]*ServerMetric, bool) {
// be queried for.
//
// The arg startID specifies that only sockets with id at or above it will be
// included in the result. The returned slice is up to a length of EntryPerPage,
// and is sorted in ascending id order.
func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
return db.get().GetServerSockets(id, startID)
// included in the result. The returned slice is up to a length of the arg maxResults
// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
return db.get().GetServerSockets(id, startID, maxResults)
}
// GetChannel returns the ChannelMetric for the channel (identified by id).
@ -133,6 +155,11 @@ func GetSocket(id int64) *SocketMetric {
return db.get().GetSocket(id)
}
// GetServer returns the ServerMetric for the server (identified by id).
func GetServer(id int64) *ServerMetric {
return db.get().GetServer(id)
}
// RegisterChannel registers the given channel c in channelz database with ref
// as its reference name, and add it to the child list of its parent (identified
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
@ -146,6 +173,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
nestedChans: make(map[int64]string),
id: id,
pid: pid,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
if pid == 0 {
db.get().addChannel(id, cn, true, pid, ref)
@ -170,6 +198,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
sockets: make(map[int64]string),
id: id,
pid: pid,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
db.get().addSubChannel(id, sc, pid, ref)
return id
@ -226,6 +255,24 @@ func RemoveEntry(id int64) {
db.get().removeEntry(id)
}
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
// to the channel trace.
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
// trace also.
type TraceEventDesc struct {
Desc string
Severity Severity
Parent *TraceEventDesc
}
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
func AddTraceEvent(id int64, desc *TraceEventDesc) {
if getMaxTraceEntry() == 0 {
return
}
db.get().traceEvent(id, desc)
}
// channelMap is the storage data structure for channelz.
// Methods of channelMap can be divided in two two categories with respect to locking.
// 1. Methods acquire the global lock.
@ -251,6 +298,7 @@ func (c *channelMap) addServer(id int64, s *server) {
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
c.mu.Lock()
cn.cm = c
cn.trace.cm = c
c.channels[id] = cn
if isTopChannel {
c.topLevelChannels[id] = struct{}{}
@ -263,6 +311,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
c.mu.Lock()
sc.cm = c
sc.trace.cm = c
c.subChannels[id] = sc
c.findEntry(pid).addChild(id, sc)
c.mu.Unlock()
@ -284,16 +333,25 @@ func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref
c.mu.Unlock()
}
// removeEntry triggers the removal of an entry, which may not indeed delete the
// entry, if it has to wait on the deletion of its children, or may lead to a chain
// of entry deletion. For example, deleting the last socket of a gracefully shutting
// down server will lead to the server being also deleted.
// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
// wait on the deletion of its children and until no other entity's channel trace references it.
// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
// shutting down server will lead to the server being also deleted.
func (c *channelMap) removeEntry(id int64) {
c.mu.Lock()
c.findEntry(id).triggerDelete()
c.mu.Unlock()
}
// c.mu must be held by the caller
func (c *channelMap) decrTraceRefCount(id int64) {
e := c.findEntry(id)
if v, ok := e.(tracedChannel); ok {
v.decrTraceRefCount()
e.deleteSelfIfReady()
}
}
// c.mu must be held by the caller.
func (c *channelMap) findEntry(id int64) entry {
var v entry
@ -347,6 +405,39 @@ func (c *channelMap) deleteEntry(id int64) {
}
}
func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
c.mu.Lock()
child := c.findEntry(id)
childTC, ok := child.(tracedChannel)
if !ok {
c.mu.Unlock()
return
}
childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
if desc.Parent != nil {
parent := c.findEntry(child.getParentID())
var chanType RefChannelType
switch child.(type) {
case *channel:
chanType = RefChannel
case *subChannel:
chanType = RefSubChannel
}
if parentTC, ok := parent.(tracedChannel); ok {
parentTC.getChannelTrace().append(&TraceEvent{
Desc: desc.Parent.Desc,
Severity: desc.Parent.Severity,
Timestamp: time.Now(),
RefID: id,
RefName: childTC.getRefName(),
RefType: chanType,
})
childTC.incrTraceRefCount()
}
}
c.mu.Unlock()
}
type int64Slice []int64
func (s int64Slice) Len() int { return len(s) }
@ -361,29 +452,32 @@ func copyMap(m map[int64]string) map[int64]string {
return n
}
func min(a, b int) int {
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}
func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
if maxResults <= 0 {
maxResults = EntryPerPage
}
c.mu.RLock()
l := len(c.topLevelChannels)
l := int64(len(c.topLevelChannels))
ids := make([]int64, 0, l)
cns := make([]*channel, 0, min(l, EntryPerPage))
cns := make([]*channel, 0, min(l, maxResults))
for k := range c.topLevelChannels {
ids = append(ids, k)
}
sort.Sort(int64Slice(ids))
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
count := 0
count := int64(0)
var end bool
var t []*ChannelMetric
for i, v := range ids[idx:] {
if count == EntryPerPage {
if count == maxResults {
break
}
if cn, ok := c.channels[v]; ok {
@ -408,25 +502,29 @@ func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
t[i].ChannelData = cn.c.ChannelzMetric()
t[i].ID = cn.id
t[i].RefName = cn.refName
t[i].Trace = cn.trace.dumpData()
}
return t, end
}
func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
if maxResults <= 0 {
maxResults = EntryPerPage
}
c.mu.RLock()
l := len(c.servers)
l := int64(len(c.servers))
ids := make([]int64, 0, l)
ss := make([]*server, 0, min(l, EntryPerPage))
ss := make([]*server, 0, min(l, maxResults))
for k := range c.servers {
ids = append(ids, k)
}
sort.Sort(int64Slice(ids))
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
count := 0
count := int64(0)
var end bool
var s []*ServerMetric
for i, v := range ids[idx:] {
if count == EntryPerPage {
if count == maxResults {
break
}
if svr, ok := c.servers[v]; ok {
@ -454,7 +552,10 @@ func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
return s, end
}
func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
if maxResults <= 0 {
maxResults = EntryPerPage
}
var svr *server
var ok bool
c.mu.RLock()
@ -464,18 +565,18 @@ func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric,
return nil, true
}
svrskts := svr.sockets
l := len(svrskts)
l := int64(len(svrskts))
ids := make([]int64, 0, l)
sks := make([]*normalSocket, 0, min(l, EntryPerPage))
sks := make([]*normalSocket, 0, min(l, maxResults))
for k := range svrskts {
ids = append(ids, k)
}
sort.Sort((int64Slice(ids)))
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
count := 0
sort.Sort(int64Slice(ids))
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
count := int64(0)
var end bool
for i, v := range ids[idx:] {
if count == EntryPerPage {
if count == maxResults {
break
}
if ns, ok := c.normalSockets[v]; ok {
@ -514,10 +615,14 @@ func (c *channelMap) GetChannel(id int64) *ChannelMetric {
}
cm.NestedChans = copyMap(cn.nestedChans)
cm.SubChans = copyMap(cn.subChans)
// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
// holding the lock to prevent potential data race.
chanCopy := cn.c
c.mu.RUnlock()
cm.ChannelData = cn.c.ChannelzMetric()
cm.ChannelData = chanCopy.ChannelzMetric()
cm.ID = cn.id
cm.RefName = cn.refName
cm.Trace = cn.trace.dumpData()
return cm
}
@ -532,10 +637,14 @@ func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
return nil
}
cm.Sockets = copyMap(sc.sockets)
// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
// holding the lock to prevent potential data race.
chanCopy := sc.c
c.mu.RUnlock()
cm.ChannelData = sc.c.ChannelzMetric()
cm.ChannelData = chanCopy.ChannelzMetric()
cm.ID = sc.id
cm.RefName = sc.refName
cm.Trace = sc.trace.dumpData()
return cm
}
@ -560,6 +669,23 @@ func (c *channelMap) GetSocket(id int64) *SocketMetric {
return nil
}
func (c *channelMap) GetServer(id int64) *ServerMetric {
sm := &ServerMetric{}
var svr *server
var ok bool
c.mu.RLock()
if svr, ok = c.servers[id]; !ok {
c.mu.RUnlock()
return nil
}
sm.ListenSockets = copyMap(svr.listenSockets)
c.mu.RUnlock()
sm.ID = svr.id
sm.RefName = svr.refName
sm.ServerData = svr.s.ChannelzMetric()
return sm
}
type idGenerator struct {
id int64
}

View File

@ -20,6 +20,8 @@ package channelz
import (
"net"
"sync"
"sync/atomic"
"time"
"google.golang.org/grpc/connectivity"
@ -40,6 +42,8 @@ type entry interface {
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
// list is now empty. If both conditions are met, then delete self from database.
deleteSelfIfReady()
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
getParentID() int64
}
// dummyEntry is a fake entry to handle entry not found case.
@ -73,6 +77,10 @@ func (*dummyEntry) deleteSelfIfReady() {
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
}
func (*dummyEntry) getParentID() int64 {
return 0
}
// ChannelMetric defines the info channelz provides for a specific Channel, which
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
// child list, etc.
@ -95,6 +103,8 @@ type ChannelMetric struct {
// Note current grpc implementation doesn't allow channel having sockets directly,
// therefore, this is field is unused.
Sockets map[int64]string
// Trace contains the most recent traced events.
Trace *ChannelTrace
}
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
@ -121,6 +131,8 @@ type SubChannelMetric struct {
// Sockets tracks the socket type children of this subchannel in the format of a map
// from socket channelz id to corresponding reference string.
Sockets map[int64]string
// Trace contains the most recent traced events.
Trace *ChannelTrace
}
// ChannelInternalMetric defines the struct that the implementor of Channel interface
@ -138,7 +150,35 @@ type ChannelInternalMetric struct {
CallsFailed int64
// The last time a call was started on the channel.
LastCallStartedTimestamp time.Time
//TODO: trace
}
// ChannelTrace stores traced events on a channel/subchannel and related info.
type ChannelTrace struct {
// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
EventNum int64
// CreationTime is the creation time of the trace.
CreationTime time.Time
// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
// oldest one)
Events []*TraceEvent
}
// TraceEvent represent a single trace event
type TraceEvent struct {
// Desc is a simple description of the trace event.
Desc string
// Severity states the severity of this trace event.
Severity Severity
// Timestamp is the event time.
Timestamp time.Time
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
// involved in this event.
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
RefID int64
// RefName is the reference name for the entity that gets referenced in the event.
RefName string
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
RefType RefChannelType
}
// Channel is the interface that should be satisfied in order to be tracked by
@ -147,6 +187,12 @@ type Channel interface {
ChannelzMetric() *ChannelInternalMetric
}
type dummyChannel struct{}
func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
return &ChannelInternalMetric{}
}
type channel struct {
refName string
c Channel
@ -156,6 +202,10 @@ type channel struct {
id int64
pid int64
cm *channelMap
trace *channelTrace
// traceRefCount is the number of trace events that reference this channel.
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
traceRefCount int32
}
func (c *channel) addChild(id int64, e entry) {
@ -180,25 +230,96 @@ func (c *channel) triggerDelete() {
c.deleteSelfIfReady()
}
func (c *channel) deleteSelfIfReady() {
func (c *channel) getParentID() int64 {
return c.pid
}
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
// deleting the channel reference from its parent's child list.
//
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
// corresponding grpc object has been invoked, and the channel does not have any children left.
//
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
func (c *channel) deleteSelfFromTree() (deleted bool) {
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
return
return false
}
c.cm.deleteEntry(c.id)
// not top channel
if c.pid != 0 {
c.cm.findEntry(c.pid).deleteChild(c.id)
}
return true
}
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
// channel, and its memory will be garbage collected.
//
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
// by grpc, the reference to the grpc object is reset to a dummy object.
//
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
//
// It returns a bool to indicate whether the channel can be safely deleted from map.
func (c *channel) deleteSelfFromMap() (delete bool) {
if c.getTraceRefCount() != 0 {
c.c = &dummyChannel{}
return false
}
return true
}
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
// The delete process includes two steps:
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
// parent's child list.
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
// will return entry not found error.
func (c *channel) deleteSelfIfReady() {
if !c.deleteSelfFromTree() {
return
}
if !c.deleteSelfFromMap() {
return
}
c.cm.deleteEntry(c.id)
c.trace.clear()
}
func (c *channel) getChannelTrace() *channelTrace {
return c.trace
}
func (c *channel) incrTraceRefCount() {
atomic.AddInt32(&c.traceRefCount, 1)
}
func (c *channel) decrTraceRefCount() {
atomic.AddInt32(&c.traceRefCount, -1)
}
func (c *channel) getTraceRefCount() int {
i := atomic.LoadInt32(&c.traceRefCount)
return int(i)
}
func (c *channel) getRefName() string {
return c.refName
}
type subChannel struct {
refName string
c Channel
closeCalled bool
sockets map[int64]string
id int64
pid int64
cm *channelMap
refName string
c Channel
closeCalled bool
sockets map[int64]string
id int64
pid int64
cm *channelMap
trace *channelTrace
traceRefCount int32
}
func (sc *subChannel) addChild(id int64, e entry) {
@ -219,12 +340,82 @@ func (sc *subChannel) triggerDelete() {
sc.deleteSelfIfReady()
}
func (sc *subChannel) deleteSelfIfReady() {
func (sc *subChannel) getParentID() int64 {
return sc.pid
}
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
// means deleting the subchannel reference from its parent's child list.
//
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
//
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
if !sc.closeCalled || len(sc.sockets) != 0 {
return false
}
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
return true
}
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
// the subchannel, and its memory will be garbage collected.
//
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
// by grpc, the reference to the grpc object is reset to a dummy object.
//
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
//
// It returns a bool to indicate whether the channel can be safely deleted from map.
func (sc *subChannel) deleteSelfFromMap() (delete bool) {
if sc.getTraceRefCount() != 0 {
// free the grpc struct (i.e. addrConn)
sc.c = &dummyChannel{}
return false
}
return true
}
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
// The delete process includes two steps:
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
// its parent's child list.
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
// by id will return entry not found error.
func (sc *subChannel) deleteSelfIfReady() {
if !sc.deleteSelfFromTree() {
return
}
if !sc.deleteSelfFromMap() {
return
}
sc.cm.deleteEntry(sc.id)
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
sc.trace.clear()
}
func (sc *subChannel) getChannelTrace() *channelTrace {
return sc.trace
}
func (sc *subChannel) incrTraceRefCount() {
atomic.AddInt32(&sc.traceRefCount, 1)
}
func (sc *subChannel) decrTraceRefCount() {
atomic.AddInt32(&sc.traceRefCount, -1)
}
func (sc *subChannel) getTraceRefCount() int {
i := atomic.LoadInt32(&sc.traceRefCount)
return int(i)
}
func (sc *subChannel) getRefName() string {
return sc.refName
}
// SocketMetric defines the info channelz provides for a specific Socket, which
@ -318,6 +509,10 @@ func (ls *listenSocket) deleteSelfIfReady() {
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
}
func (ls *listenSocket) getParentID() int64 {
return ls.pid
}
type normalSocket struct {
refName string
s Socket
@ -343,6 +538,10 @@ func (ns *normalSocket) deleteSelfIfReady() {
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
}
func (ns *normalSocket) getParentID() int64 {
return ns.pid
}
// ServerMetric defines the info channelz provides for a specific Server, which
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
// child list, etc.
@ -370,7 +569,6 @@ type ServerInternalMetric struct {
CallsFailed int64
// The last time a call was started on the server.
LastCallStartedTimestamp time.Time
//TODO: trace
}
// Server is the interface to be satisfied in order to be tracked by channelz as
@ -417,3 +615,88 @@ func (s *server) deleteSelfIfReady() {
}
s.cm.deleteEntry(s.id)
}
func (s *server) getParentID() int64 {
return 0
}
type tracedChannel interface {
getChannelTrace() *channelTrace
incrTraceRefCount()
decrTraceRefCount()
getRefName() string
}
type channelTrace struct {
cm *channelMap
createdTime time.Time
eventCount int64
mu sync.Mutex
events []*TraceEvent
}
func (c *channelTrace) append(e *TraceEvent) {
c.mu.Lock()
if len(c.events) == getMaxTraceEntry() {
del := c.events[0]
c.events = c.events[1:]
if del.RefID != 0 {
// start recursive cleanup in a goroutine to not block the call originated from grpc.
go func() {
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
c.cm.mu.Lock()
c.cm.decrTraceRefCount(del.RefID)
c.cm.mu.Unlock()
}()
}
}
e.Timestamp = time.Now()
c.events = append(c.events, e)
c.eventCount++
c.mu.Unlock()
}
func (c *channelTrace) clear() {
c.mu.Lock()
for _, e := range c.events {
if e.RefID != 0 {
// caller should have already held the c.cm.mu lock.
c.cm.decrTraceRefCount(e.RefID)
}
}
c.mu.Unlock()
}
// Severity is the severity level of a trace event.
// The canonical enumeration of all valid values is here:
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
type Severity int
const (
// CtUNKNOWN indicates unknown severity of a trace event.
CtUNKNOWN Severity = iota
// CtINFO indicates info level severity of a trace event.
CtINFO
// CtWarning indicates warning level severity of a trace event.
CtWarning
// CtError indicates error level severity of a trace event.
CtError
)
// RefChannelType is the type of the entity being referenced in a trace event.
type RefChannelType int
const (
// RefChannel indicates the referenced entity is a Channel.
RefChannel RefChannelType = iota
// RefSubChannel indicates the referenced entity is a SubChannel.
RefSubChannel
)
func (c *channelTrace) dumpData() *ChannelTrace {
c.mu.Lock()
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
ct.Events = c.events[:len(c.events)]
c.mu.Unlock()
return ct
}

View File

@ -1,4 +1,4 @@
// +build !appengine,go1.7
// +build !appengine
/*
*
@ -50,5 +50,4 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) {
if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
s.TCPInfo = v
}
return
}

View File

@ -1,4 +1,4 @@
// +build !linux appengine !go1.7
// +build !linux appengine
/*
*
@ -20,11 +20,13 @@
package channelz
import "google.golang.org/grpc/grpclog"
import (
"sync"
func init() {
grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.")
}
"google.golang.org/grpc/grpclog"
)
var once sync.Once
// SocketOptionData defines the struct to hold socket option data, and related
// getter function to obtain info from fd.
@ -35,4 +37,8 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
func (s *SocketOptionData) Getsockopt(fd uintptr) {}
func (s *SocketOptionData) Getsockopt(fd uintptr) {
once.Do(func() {
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
})
}

View File

@ -1,4 +1,4 @@
// +build linux,go1.9,!appengine
// +build linux,!appengine
/*
*

View File

@ -1,4 +1,4 @@
// +build !linux !go1.9 appengine
// +build !linux appengine
/*
*

View File

@ -25,11 +25,46 @@ import (
)
const (
prefix = "GRPC_GO_"
retryStr = prefix + "RETRY"
prefix = "GRPC_GO_"
retryStr = prefix + "RETRY"
requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
)
// RequireHandshakeSetting describes the settings for handshaking.
type RequireHandshakeSetting int
const (
// RequireHandshakeHybrid (default, deprecated) indicates to not wait for
// handshake before considering a connection ready, but wait before
// considering successful.
RequireHandshakeHybrid RequireHandshakeSetting = iota
// RequireHandshakeOn (default after the 1.17 release) indicates to wait
// for handshake before considering a connection ready/successful.
RequireHandshakeOn
// RequireHandshakeOff indicates to not wait for handshake before
// considering a connection ready/successful.
RequireHandshakeOff
)
var (
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
// environment variable.
//
// Will be removed after the 1.18 release.
RequireHandshake RequireHandshakeSetting
)
func init() {
switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
case "on":
default:
RequireHandshake = RequireHandshakeOn
case "off":
RequireHandshake = RequireHandshakeOff
case "hybrid":
// Will be removed after the 1.17 release.
RequireHandshake = RequireHandshakeHybrid
}
}

View File

@ -0,0 +1,61 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package grpcsync implements additional synchronization primitives built upon
// the sync package.
package grpcsync
import (
"sync"
"sync/atomic"
)
// Event represents a one-time event that may occur in the future.
type Event struct {
fired int32
c chan struct{}
o sync.Once
}
// Fire causes e to complete. It is safe to call multiple times, and
// concurrently. It returns true iff this call to Fire caused the signaling
// channel returned by Done to close.
func (e *Event) Fire() bool {
ret := false
e.o.Do(func() {
atomic.StoreInt32(&e.fired, 1)
close(e.c)
ret = true
})
return ret
}
// Done returns a channel that will be closed when Fire is called.
func (e *Event) Done() <-chan struct{} {
return e.c
}
// HasFired returns true if Fire has been called.
func (e *Event) HasFired() bool {
return atomic.LoadInt32(&e.fired) == 1
}
// NewEvent returns a new, ready-to-use Event.
func NewEvent() *Event {
return &Event{c: make(chan struct{})}
}

View File

@ -20,17 +20,31 @@
// symbols to avoid circular dependencies.
package internal
import "context"
var (
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
// It must be called before Serve and requires TLS credentials.
//
// The provided grpcServer must be of type *grpc.Server. It is untyped
// for circular dependency reasons.
TestingUseHandlerImpl func(grpcServer interface{})
// WithContextDialer is exported by clientconn.go
// WithContextDialer is exported by dialoptions.go
WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
// WithResolverBuilder is exported by clientconn.go
// WithResolverBuilder is exported by dialoptions.go
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
// WithHealthCheckFunc is not exported by dialoptions.go
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
// BalancerUnregister is exported by package balancer to unregister a balancer.
BalancerUnregister func(name string)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
CredsBundleModeFallback = "fallback"
// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
// mode.
CredsBundleModeBalancer = "balancer"
// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
// that supports backend returned by grpclb balancer.
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
)

View File

@ -0,0 +1,114 @@
// +build !appengine
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package syscall provides functionalities that grpc uses to get low-level operating system
// stats/info.
package syscall
import (
"fmt"
"net"
"syscall"
"time"
"golang.org/x/sys/unix"
"google.golang.org/grpc/grpclog"
)
// GetCPUTime returns the how much CPU time has passed since the start of this process.
func GetCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
grpclog.Fatal(err)
}
return ts.Nano()
}
// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
type Rusage syscall.Rusage
// GetRusage returns the resource usage of current process.
func GetRusage() (rusage *Rusage) {
rusage = new(Rusage)
syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
return
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
f := (*syscall.Rusage)(first)
l := (*syscall.Rusage)(latest)
var (
utimeDiffs = l.Utime.Sec - f.Utime.Sec
utimeDiffus = l.Utime.Usec - f.Utime.Usec
stimeDiffs = l.Stime.Sec - f.Stime.Sec
stimeDiffus = l.Stime.Usec - f.Stime.Usec
)
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
return uTimeElapsed, sTimeElapsed
}
// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
tcpconn, ok := conn.(*net.TCPConn)
if !ok {
// not a TCP connection. exit early
return nil
}
rawConn, err := tcpconn.SyscallConn()
if err != nil {
return fmt.Errorf("error getting raw connection: %v", err)
}
err = rawConn.Control(func(fd uintptr) {
err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
})
if err != nil {
return fmt.Errorf("error setting option on socket: %v", err)
}
return nil
}
// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
tcpconn, ok := conn.(*net.TCPConn)
if !ok {
err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
return
}
rawConn, err := tcpconn.SyscallConn()
if err != nil {
err = fmt.Errorf("error getting raw connection: %v", err)
return
}
err = rawConn.Control(func(fd uintptr) {
opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
})
if err != nil {
err = fmt.Errorf("error getting option on socket: %v", err)
return
}
return
}

View File

@ -0,0 +1,63 @@
// +build !linux appengine
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package syscall
import (
"net"
"time"
"google.golang.org/grpc/grpclog"
)
func init() {
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
}
// GetCPUTime returns the how much CPU time has passed since the start of this process.
// It always returns 0 under non-linux or appengine environment.
func GetCPUTime() int64 {
return 0
}
// Rusage is an empty struct under non-linux or appengine environment.
type Rusage struct{}
// GetRusage is a no-op function under non-linux or appengine environment.
func GetRusage() (rusage *Rusage) {
return nil
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux or appengine environment.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
// a negative return value indicates the operation is not supported
func GetTCPUserTimeout(conn net.Conn) (int, error) {
return -1, nil
}

View File

@ -24,9 +24,10 @@ import (
)
const (
// bdpLimit is the maximum value the flow control windows
// will be increased to.
bdpLimit = (1 << 20) * 4
// bdpLimit is the maximum value the flow control windows will be increased
// to. TCP typically limits this to 4MB, but some systems go up to 16MB.
// Since this is only a limit, it is safe to make it optimistic.
bdpLimit = (1 << 20) * 16
// alpha is a constant factor used to keep a moving average
// of RTTs.
alpha = 0.9

View File

@ -104,7 +104,6 @@ type headerFrame struct {
type cleanupStream struct {
streamID uint32
idPtr *uint32
rst bool
rstCode http2.ErrCode
onWrite func()
@ -138,9 +137,6 @@ type outgoingSettings struct {
ss []http2.Setting
}
type settingsAck struct {
}
type incomingGoAway struct {
}

View File

@ -1,52 +0,0 @@
// +build go1.6,!go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"net"
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
// ContextErr converts the error from context package into a status error.
func ContextErr(err error) error {
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
}
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
}
// contextFromRequest returns a background context.
func contextFromRequest(r *http.Request) context.Context {
return context.Background()
}

View File

@ -1,53 +0,0 @@
// +build go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"context"
"net"
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
// ContextErr converts the error from context package into a status error.
func ContextErr(err error) error {
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
}
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
}
// contextFromRequest returns a context from the HTTP Request.
func contextFromRequest(r *http.Request) context.Context {
return r.Context()
}

View File

@ -24,6 +24,7 @@
package transport
import (
"context"
"errors"
"fmt"
"io"
@ -34,7 +35,6 @@ import (
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@ -237,9 +237,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
if ht.stats != nil {
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
}
ht.Close()
close(ht.writes)
}
ht.Close()
return err
}
@ -307,7 +307,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
// With this transport type there will be exactly 1 stream: this HTTP request.
ctx := contextFromRequest(ht.req)
ctx := ht.req.Context()
var cancel context.CancelFunc
if ht.timeoutSet {
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@ -326,11 +326,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
go func() {
select {
case <-requestOver:
return
case <-ht.closedCh:
case <-clientGone:
}
cancel()
ht.Close()
}()
req := ht.req
@ -442,5 +442,8 @@ func mapRecvMsgError(err error) error {
return status.Error(code, se.Error())
}
}
if strings.Contains(err.Error(), "body closed by handler") {
return status.Error(codes.Canceled, err.Error())
}
return connectionErrorf(true, err, err.Error())
}

View File

@ -19,6 +19,8 @@
package transport
import (
"context"
"fmt"
"io"
"math"
"net"
@ -28,13 +30,13 @@ import (
"sync/atomic"
"time"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -73,7 +75,7 @@ type http2Client struct {
isSecure bool
creds []credentials.PerRPCCredentials
perRPCCreds []credentials.PerRPCCredentials
// Boolean to keep track of reading activity on transport.
// 1 is true and 0 is false.
@ -89,10 +91,10 @@ type http2Client struct {
maxSendHeaderListSize *uint32
bdpEst *bdpEstimator
// onSuccess is a callback that client transport calls upon
// onPrefaceReceipt is a callback that client transport calls upon
// receiving server preface to signal that a succefull HTTP2
// connection was established.
onSuccess func()
onPrefaceReceipt func()
maxConcurrentStreams uint32
streamQuota int64
@ -111,26 +113,17 @@ type http2Client struct {
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
czmu sync.RWMutex
kpCount int64
// The number of streams that have started, including already finished ones.
streamsStarted int64
// The number of streams that have ended successfully by receiving EoS bit set
// frame from server.
streamsSucceeded int64
streamsFailed int64
lastStreamCreated time.Time
msgSent int64
msgRecv int64
lastMsgSent time.Time
lastMsgRecv time.Time
czData *channelzData
onGoAway func(GoAwayReason)
onClose func()
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
if fn != nil {
return fn(ctx, addr)
}
return dialContext(ctx, "tcp", addr)
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
}
func isTemporary(err error) bool {
@ -152,7 +145,7 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ *http2Client, err error) {
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@ -174,18 +167,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
conn.Close()
}
}(conn)
var (
isSecure bool
authInfo credentials.AuthInfo
)
if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
isSecure = true
}
kp := opts.KeepaliveParams
// Validate keepalive parameters.
if kp.Time == 0 {
@ -194,6 +175,36 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
if kp.Timeout == 0 {
kp.Timeout = defaultClientKeepaliveTimeout
}
keepaliveEnabled := false
if kp.Time != infinity {
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
keepaliveEnabled = true
}
var (
isSecure bool
authInfo credentials.AuthInfo
)
transportCreds := opts.TransportCredentials
perRPCCreds := opts.PerRPCCredentials
if b := opts.CredsBundle; b != nil {
if t := b.TransportCredentials(); t != nil {
transportCreds = t
}
if t := b.PerRPCCredentials(); t != nil {
perRPCCreds = append(perRPCCreds, t)
}
}
if transportCreds != nil {
scheme = "https"
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
isSecure = true
}
dynamicWindow := true
icwz := int32(initialWindowSize)
if opts.InitialConnWindowSize >= defaultWindowSize {
@ -225,15 +236,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
scheme: scheme,
activeStreams: make(map[uint32]*Stream),
isSecure: isSecure,
creds: opts.PerRPCCredentials,
perRPCCreds: perRPCCreds,
kp: kp,
statsHandler: opts.StatsHandler,
initialWindowSize: initialWindowSize,
onSuccess: onSuccess,
onPrefaceReceipt: onPrefaceReceipt,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
czData: new(channelzData),
onGoAway: onGoAway,
onClose: onClose,
keepaliveEnabled: keepaliveEnabled,
}
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
@ -260,16 +275,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
t.statsHandler.HandleConn(t.ctx, connBegin)
}
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "")
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
}
if t.kp.Time != infinity {
t.keepaliveEnabled = true
if t.keepaliveEnabled {
go t.keepalive()
}
// Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it
// dispatches the frame to the corresponding stream entity.
go t.reader()
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
@ -306,6 +321,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
}
}
t.framer.writer.Flush()
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
@ -346,6 +362,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
ctx: s.ctx,
ctxDone: s.ctx.Done(),
recv: s.buf,
closeStream: func(err error) {
t.CloseStream(s, err)
},
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -454,7 +473,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
func (t *http2Client) createAudience(callHdr *CallHdr) string {
// Create an audience string only if needed.
if len(t.creds) == 0 && callHdr.Creds == nil {
if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
return ""
}
// Construct URI required to get auth request metadata.
@ -469,7 +488,7 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string {
func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
authData := map[string]string{}
for _, c := range t.creds {
for _, c := range t.perRPCCreds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
if _, ok := status.FromError(err); ok {
@ -550,10 +569,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
t.activeStreams[id] = s
if channelz.IsOn() {
t.czmu.Lock()
t.streamsStarted++
t.lastStreamCreated = time.Now()
t.czmu.Unlock()
atomic.AddInt64(&t.czData.streamsStarted, 1)
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
}
var sendPing bool
// If the number of active streams change from 0 to 1, then check if keepalive
@ -677,7 +694,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
// Set stream status to done.
if s.swapState(streamDone) == streamDone {
// If it was already done, return.
// If it was already done, return. If multiple closeStream calls
// happen simultaneously, wait for the first to finish.
<-s.done
return
}
// status and trailers can be updated here without any synchronization because the stream goroutine will
@ -691,8 +710,6 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// This will unblock reads eventually.
s.write(recvMsg{err: err})
}
// This will unblock write.
close(s.done)
// If headerChan isn't closed, then close it.
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
s.noHeaders = true
@ -707,13 +724,11 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
}
t.mu.Unlock()
if channelz.IsOn() {
t.czmu.Lock()
if eosReceived {
t.streamsSucceeded++
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
} else {
t.streamsFailed++
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
t.czmu.Unlock()
}
},
rst: rst,
@ -730,11 +745,17 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
return true
}
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
// This will unblock write.
close(s.done)
}
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
//
// This method blocks until the addrConn that initiated this transport is
// re-connected. This happens because t.onClose() begins reconnect logic at the
// addrConn level and blocks until the addrConn is successfully connected.
func (t *http2Client) Close() error {
t.mu.Lock()
// Make sure we only Close once.
@ -762,6 +783,7 @@ func (t *http2Client) Close() error {
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
t.onClose()
return err
}
@ -1058,6 +1080,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
close(t.goAway)
t.state = draining
t.controlBuf.put(&incomingGoAway{})
// This has to be a new goroutine because we're still using the current goroutine to read in the transport.
t.onGoAway(t.goAwayReason)
}
// All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed.
@ -1160,7 +1185,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !endStream {
return
}
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true)
// if client received END_STREAM from server while stream was still active, send RST_STREAM
rst := s.getState() == streamActive
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true)
}
// reader runs as a separate goroutine in charge of reading data from network
@ -1174,18 +1201,19 @@ func (t *http2Client) reader() {
// Check the validity of server preface.
frame, err := t.framer.fr.ReadFrame()
if err != nil {
t.Close()
t.Close() // this kicks off resetTransport, so must be last before return
return
}
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
if t.keepaliveEnabled {
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
t.Close()
t.Close() // this kicks off resetTransport, so must be last before return
return
}
t.onSuccess()
t.onPrefaceReceipt()
t.handleSettings(sf, true)
// loop to keep reading incoming messages on this transport.
@ -1263,9 +1291,7 @@ func (t *http2Client) keepalive() {
} else {
t.mu.Unlock()
if channelz.IsOn() {
t.czmu.Lock()
t.kpCount++
t.czmu.Unlock()
atomic.AddInt64(&t.czData.kpCount, 1)
}
// Send ping.
t.controlBuf.put(p)
@ -1305,17 +1331,16 @@ func (t *http2Client) GoAway() <-chan struct{} {
}
func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
t.czmu.RLock()
s := channelz.SocketInternalMetric{
StreamsStarted: t.streamsStarted,
StreamsSucceeded: t.streamsSucceeded,
StreamsFailed: t.streamsFailed,
MessagesSent: t.msgSent,
MessagesReceived: t.msgRecv,
KeepAlivesSent: t.kpCount,
LastLocalStreamCreatedTimestamp: t.lastStreamCreated,
LastMessageSentTimestamp: t.lastMsgSent,
LastMessageReceivedTimestamp: t.lastMsgRecv,
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
LocalFlowControlWindow: int64(t.fc.getSize()),
SocketOptions: channelz.GetSocketOption(t.conn),
LocalAddr: t.localAddr,
@ -1325,23 +1350,18 @@ func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
s.Security = au.GetSecurityValue()
}
t.czmu.RUnlock()
s.RemoteFlowControlWindow = t.getOutFlowWindow()
return &s
}
func (t *http2Client) IncrMsgSent() {
t.czmu.Lock()
t.msgSent++
t.lastMsgSent = time.Now()
t.czmu.Unlock()
atomic.AddInt64(&t.czData.msgSent, 1)
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
}
func (t *http2Client) IncrMsgRecv() {
t.czmu.Lock()
t.msgRecv++
t.lastMsgRecv = time.Now()
t.czmu.Unlock()
atomic.AddInt64(&t.czData.msgRecv, 1)
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
}
func (t *http2Client) getOutFlowWindow() int64 {

View File

@ -20,6 +20,7 @@ package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
@ -31,7 +32,6 @@ import (
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
@ -118,19 +118,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
czmu sync.RWMutex
kpCount int64
// The number of streams that have started, including already finished ones.
streamsStarted int64
// The number of streams that have ended successfully by sending frame with
// EoS bit set.
streamsSucceeded int64
streamsFailed int64
lastStreamCreated time.Time
msgSent int64
msgRecv int64
lastMsgSent time.Time
lastMsgRecv time.Time
czData *channelzData
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@ -231,6 +219,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
czData: new(channelzData),
}
t.controlBuf = newControlBuffer(t.ctxDone)
if dynamicWindow {
@ -248,7 +237,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
t.stats.HandleConn(t.ctx, connBegin)
}
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "")
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
}
t.framer.writer.Flush()
@ -295,7 +284,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
}
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
streamID := frame.Header().StreamID
state := decodeState{serverSide: true}
if err := state.decodeHeader(frame); err != nil {
@ -307,7 +296,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
onWrite: func() {},
})
}
return
return false
}
buf := newRecvBuffer()
@ -361,13 +350,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
return
return false
}
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
return
return false
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
@ -377,7 +366,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
return
return false
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
@ -392,10 +381,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
t.mu.Unlock()
if channelz.IsOn() {
t.czmu.Lock()
t.streamsStarted++
t.lastStreamCreated = time.Now()
t.czmu.Unlock()
atomic.AddInt64(&t.czData.streamsStarted, 1)
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
}
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
@ -430,7 +417,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
wq: s.wq,
})
handle(s)
return
return false
}
// HandleStreams receives incoming streams using the given handler. This is
@ -977,9 +964,7 @@ func (t *http2Server) keepalive() {
}
pingSent = true
if channelz.IsOn() {
t.czmu.Lock()
t.kpCount++
t.czmu.Unlock()
atomic.AddInt64(&t.czData.kpCount, 1)
}
t.controlBuf.put(p)
keepalive.Reset(t.kp.Timeout)
@ -1044,13 +1029,11 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hd
}
t.mu.Unlock()
if channelz.IsOn() {
t.czmu.Lock()
if eosReceived {
t.streamsSucceeded++
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
} else {
t.streamsFailed++
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
t.czmu.Unlock()
}
},
}
@ -1138,17 +1121,16 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
}
func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
t.czmu.RLock()
s := channelz.SocketInternalMetric{
StreamsStarted: t.streamsStarted,
StreamsSucceeded: t.streamsSucceeded,
StreamsFailed: t.streamsFailed,
MessagesSent: t.msgSent,
MessagesReceived: t.msgRecv,
KeepAlivesSent: t.kpCount,
LastRemoteStreamCreatedTimestamp: t.lastStreamCreated,
LastMessageSentTimestamp: t.lastMsgSent,
LastMessageReceivedTimestamp: t.lastMsgRecv,
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
LocalFlowControlWindow: int64(t.fc.getSize()),
SocketOptions: channelz.GetSocketOption(t.conn),
LocalAddr: t.localAddr,
@ -1158,23 +1140,18 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
s.Security = au.GetSecurityValue()
}
t.czmu.RUnlock()
s.RemoteFlowControlWindow = t.getOutFlowWindow()
return &s
}
func (t *http2Server) IncrMsgSent() {
t.czmu.Lock()
t.msgSent++
t.lastMsgSent = time.Now()
t.czmu.Unlock()
atomic.AddInt64(&t.czData.msgSent, 1)
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
}
func (t *http2Server) IncrMsgRecv() {
t.czmu.Lock()
t.msgRecv++
t.lastMsgRecv = time.Now()
t.czmu.Unlock()
atomic.AddInt64(&t.czData.msgRecv, 1)
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
}
func (t *http2Server) getOutFlowWindow() int64 {

View File

@ -24,6 +24,7 @@ import (
"encoding/base64"
"fmt"
"io"
"math"
"net"
"net/http"
"strconv"
@ -435,6 +436,10 @@ func decodeTimeout(s string) (time.Duration, error) {
if size < 2 {
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
}
if size > 9 {
// Spec allows for 8 digits plus the unit.
return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
}
unit := timeoutUnit(s[size-1])
d, ok := timeoutUnitToDuration(unit)
if !ok {
@ -444,6 +449,11 @@ func decodeTimeout(s string) (time.Duration, error) {
if err != nil {
return 0, err
}
const maxHours = math.MaxInt64 / int64(time.Hour)
if d == time.Hour && t > maxHours {
// This timeout would overflow math.MaxInt64; clamp it.
return time.Duration(math.MaxInt64), nil
}
return d * time.Duration(t), nil
}

View File

@ -42,9 +42,3 @@ func errorf(format string, args ...interface{}) {
grpclog.Errorf(format, args...)
}
}
func fatalf(format string, args ...interface{}) {
if grpclog.V(logLevel) {
grpclog.Fatalf(format, args...)
}
}

View File

@ -22,6 +22,7 @@
package transport
import (
"context"
"errors"
"fmt"
"io"
@ -29,7 +30,6 @@ import (
"sync"
"sync/atomic"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
@ -110,15 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg {
return b.c
}
//
// recvBufferReader implements io.Reader interface to read the data from
// recvBuffer.
type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last []byte // Stores the remaining data in the previous calls.
err error
closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last []byte // Stores the remaining data in the previous calls.
err error
}
// Read reads the next len(p) bytes from last. If last is drained, it tries to
@ -128,31 +128,53 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
n, r.err = r.read(p)
return n, r.err
}
func (r *recvBufferReader) read(p []byte) (n int, err error) {
if r.last != nil && len(r.last) > 0 {
// Read remaining data left in last call.
copied := copy(p, r.last)
r.last = r.last[copied:]
return copied, nil
}
if r.closeStream != nil {
n, r.err = r.readClient(p)
} else {
n, r.err = r.read(p)
}
return n, r.err
}
func (r *recvBufferReader) read(p []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
r.recv.load()
if m.err != nil {
return 0, m.err
}
copied := copy(p, m.data)
r.last = m.data[copied:]
return copied, nil
return r.readAdditional(m, p)
}
}
func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
select {
case <-r.ctxDone:
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, p)
case m := <-r.recv.get():
return r.readAdditional(m, p)
}
}
func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
return 0, m.err
}
copied := copy(p, m.data)
r.last = m.data[copied:]
return copied, nil
}
type streamState uint32
const (
@ -176,7 +198,6 @@ type Stream struct {
buf *recvBuffer
trReader io.Reader
fc *inFlow
recvQuota uint32
wq *writeQuota
// Callback to state application's intentions to read data. This
@ -187,8 +208,12 @@ type Stream struct {
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
// hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex
header metadata.MD // the received header metadata.
hdrMu sync.Mutex
// On client side, header keeps the received header metadata.
//
// On server side, header keeps the header set by SetHeader(). The complete
// header will merged into this after t.WriteHeader() is called.
header metadata.MD
trailer metadata.MD // the key-value map of trailer metadata.
noHeaders bool // set if the client never received headers (set only after the stream is done).
@ -267,10 +292,19 @@ func (s *Stream) Done() <-chan struct{} {
return s.done
}
// Header acquires the key-value pairs of header metadata once it
// is available. It blocks until i) the metadata is ready or ii) there is no
// header metadata or iii) the stream is canceled/expired.
// Header returns the header metadata of the stream.
//
// On client side, it acquires the key-value pairs of header metadata once it is
// available. It blocks until i) the metadata is ready or ii) there is no header
// metadata or iii) the stream is canceled/expired.
//
// On server side, it returns the out header after t.WriteHeader is called.
func (s *Stream) Header() (metadata.MD, error) {
if s.headerChan == nil && s.header != nil {
// On server side, return the header in stream. It will be the out
// header after t.WriteHeader is called.
return s.header.Copy(), nil
}
err := s.waitOnHeader()
// Even if the stream is closed, header is returned if available.
select {
@ -466,8 +500,12 @@ type ConnectOptions struct {
FailOnNonTempDialError bool
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
PerRPCCredentials []credentials.PerRPCCredentials
// TransportCredentials stores the Authenticator required to setup a client connection.
// TransportCredentials stores the Authenticator required to setup a client
// connection. Only one of TransportCredentials and CredsBundle is non-nil.
TransportCredentials credentials.TransportCredentials
// CredsBundle is the credentials bundle to be used. Only one of
// TransportCredentials and CredsBundle is non-nil.
CredsBundle credentials.Bundle
// KeepaliveParams stores the keepalive parameters.
KeepaliveParams keepalive.ClientParameters
// StatsHandler stores the handler for stats.
@ -495,8 +533,8 @@ type TargetInfo struct {
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
}
// Options provides additional hints and information for message
@ -683,3 +721,38 @@ const (
// "too_many_pings".
GoAwayTooManyPings GoAwayReason = 2
)
// channelzData is used to store channelz related data for http2Client and http2Server.
// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
type channelzData struct {
kpCount int64
// The number of streams that have started, including already finished ones.
streamsStarted int64
// Client side: The number of streams that have ended successfully by receiving
// EoS bit set frame from server.
// Server side: The number of streams that have ended successfully by sending
// frame with EoS bit set.
streamsSucceeded int64
streamsFailed int64
// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
// instead of time.Time since it's more costly to atomically update time.Time variable than int64
// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
lastStreamCreatedTime int64
msgSent int64
msgRecv int64
lastMsgSentTime int64
lastMsgRecvTime int64
}
// ContextErr converts the error from context package into a status error.
func ContextErr(err error) error {
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
}
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
}

View File

@ -16,7 +16,8 @@
*
*/
// Package keepalive defines configurable parameters for point-to-point healthcheck.
// Package keepalive defines configurable parameters for point-to-point
// healthcheck.
package keepalive
import (
@ -24,42 +25,59 @@ import (
)
// ClientParameters is used to set keepalive parameters on the client-side.
// These configure how the client will actively probe to notice when a connection is broken
// and send pings so intermediaries will be aware of the liveness of the connection.
// Make sure these parameters are set in coordination with the keepalive policy on the server,
// as incompatible settings can result in closing of connection.
// These configure how the client will actively probe to notice when a
// connection is broken and send pings so intermediaries will be aware of the
// liveness of the connection. Make sure these parameters are set in
// coordination with the keepalive policy on the server, as incompatible
// settings can result in closing of connection.
type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
Time time.Duration // The current default value is infinity.
// After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
Timeout time.Duration // The current default value is 20 seconds.
// If true, client runs keepalive checks even with no active RPCs.
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
PermitWithoutStream bool // false by default.
}
// ServerParameters is used to set keepalive and max-age parameters on the server-side.
// ServerParameters is used to set keepalive and max-age parameters on the
// server-side.
type ServerParameters struct {
// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
// MaxConnectionIdle is a duration for the amount of time after which an
// idle connection would be closed by sending a GoAway. Idleness duration is
// defined since the most recent time the number of outstanding RPCs became
// zero or the connection establishment.
MaxConnectionIdle time.Duration // The current default value is infinity.
// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
// MaxConnectionAge is a duration for the maximum amount of time a
// connection may exist before it will be closed by sending a GoAway. A
// random jitter of +/-10% will be added to MaxConnectionAge to spread out
// connection storms.
MaxConnectionAge time.Duration // The current default value is infinity.
// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
// which the connection will be forcibly closed.
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
// After a duration of this time if the server doesn't see any activity it
// pings the client to see if the transport is still alive.
Time time.Duration // The current default value is 2 hours.
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
// After having pinged for keepalive check, the server waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
Timeout time.Duration // The current default value is 20 seconds.
}
// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
// Server will close connection with a client that violates this policy.
// EnforcementPolicy is used to set keepalive enforcement policy on the
// server-side. Server will close connection with a client that violates this
// policy.
type EnforcementPolicy struct {
// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
// MinTime is the minimum amount of time a client should wait before sending
// a keepalive ping.
MinTime time.Duration // The current default value is 5 minutes.
// If true, server expects keepalive pings even when there are no active streams(RPCs).
// If true, server allows keepalive pings even when there are no active
// streams(RPCs). If false, and client sends ping when there are no active
// streams, server will send GOAWAY and close the connection.
PermitWithoutStream bool // false by default.
}

View File

@ -22,10 +22,9 @@
package metadata // import "google.golang.org/grpc/metadata"
import (
"context"
"fmt"
"strings"
"golang.org/x/net/context"
)
// DecodeKeyValue returns k, v, nil.

View File

@ -19,13 +19,13 @@
package naming
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
)
@ -37,6 +37,9 @@ const (
var (
errMissingAddr = errors.New("missing address")
errWatcherClose = errors.New("watcher has been closed")
lookupHost = net.DefaultResolver.LookupHost
lookupSRV = net.DefaultResolver.LookupSRV
)
// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and

View File

@ -1,34 +0,0 @@
// +build go1.6,!go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package naming
import (
"net"
"golang.org/x/net/context"
)
var (
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
return net.LookupSRV(service, proto, name)
}
)

View File

@ -21,9 +21,9 @@
package peer
import (
"context"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc/credentials"
)

View File

@ -19,10 +19,10 @@
package grpc
import (
"context"
"io"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"

View File

@ -19,7 +19,8 @@
package grpc
import (
"golang.org/x/net/context"
"context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
@ -56,6 +57,7 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
if b.sc == nil {
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
if err != nil {
//TODO(yuxuanli): why not change the cc state to Idle?
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
return
}

View File

@ -20,6 +20,8 @@ package grpc
import (
"bufio"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
@ -27,10 +29,10 @@ import (
"net/http"
"net/http/httputil"
"net/url"
"golang.org/x/net/context"
)
const proxyAuthHeaderKey = "Proxy-Authorization"
var (
// errDisabled indicates that proxy is disabled for the address.
errDisabled = errors.New("proxy is disabled for the address")
@ -38,7 +40,7 @@ var (
httpProxyFromEnvironment = http.ProxyFromEnvironment
)
func mapAddress(ctx context.Context, address string) (string, error) {
func mapAddress(ctx context.Context, address string) (*url.URL, error) {
req := &http.Request{
URL: &url.URL{
Scheme: "https",
@ -47,12 +49,12 @@ func mapAddress(ctx context.Context, address string) (string, error) {
}
url, err := httpProxyFromEnvironment(req)
if err != nil {
return "", err
return nil, err
}
if url == nil {
return "", errDisabled
return nil, errDisabled
}
return url.Host, nil
return url, nil
}
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
@ -69,18 +71,28 @@ func (c *bufConn) Read(b []byte) (int, error) {
return c.r.Read(b)
}
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
}
}()
req := (&http.Request{
req := &http.Request{
Method: http.MethodConnect,
URL: &url.URL{Host: addr},
URL: &url.URL{Host: backendAddr},
Header: map[string][]string{"User-Agent": {grpcUA}},
})
}
if t := proxyURL.User; t != nil {
u := t.Username()
p, _ := t.Password()
req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
}
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
@ -108,23 +120,33 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
// provided dialer, does HTTP CONNECT handshake and returns the connection.
func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (conn net.Conn, err error) {
var skipHandshake bool
newAddr, err := mapAddress(ctx, addr)
var newAddr string
proxyURL, err := mapAddress(ctx, addr)
if err != nil {
if err != errDisabled {
return nil, err
}
skipHandshake = true
newAddr = addr
} else {
newAddr = proxyURL.Host
}
conn, err = dialer(ctx, newAddr)
if err != nil {
return
}
if !skipHandshake {
conn, err = doHTTPConnectHandshake(ctx, conn, addr)
if proxyURL != nil {
// proxy is disabled if proxyURL is nil.
conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
}
return
}
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}

View File

@ -1,6 +1,6 @@
/*
*
* Copyright 2017 gRPC authors.
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,6 +21,7 @@
package dns
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -31,7 +32,6 @@ import (
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/grpcrand"
@ -43,9 +43,10 @@ func init() {
}
const (
defaultPort = "443"
defaultFreq = time.Minute * 30
golang = "GO"
defaultPort = "443"
defaultFreq = time.Minute * 30
defaultDNSSvrPort = "53"
golang = "GO"
// In DNS, service config is encoded in a TXT record via the mechanism
// described in RFC-1464 using the attribute name grpc_config.
txtAttribute = "grpc_config="
@ -61,6 +62,31 @@ var (
errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
)
var (
defaultResolver netResolver = net.DefaultResolver
)
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
return func(ctx context.Context, network, address string) (net.Conn, error) {
var dialer net.Dialer
return dialer.DialContext(ctx, network, authority)
}
}
var customAuthorityResolver = func(authority string) (netResolver, error) {
host, port, err := parseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
}
authorityWithPort := net.JoinHostPort(host, port)
return &net.Resolver{
PreferGo: true,
Dial: customAuthorityDialler(authorityWithPort),
}, nil
}
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
func NewBuilder() resolver.Builder {
return &dnsBuilder{minFreq: defaultFreq}
@ -73,10 +99,7 @@ type dnsBuilder struct {
// Build creates and starts a DNS resolver that watches the name resolution of the target.
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
if target.Authority != "" {
return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server")
}
host, port, err := parseTarget(target.Endpoint)
host, port, err := parseTarget(target.Endpoint, defaultPort)
if err != nil {
return nil, err
}
@ -111,6 +134,15 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
disableServiceConfig: opts.DisableServiceConfig,
}
if target.Authority == "" {
d.resolver = defaultResolver
} else {
d.resolver, err = customAuthorityResolver(target.Authority)
if err != nil {
return nil, err
}
}
d.wg.Add(1)
go d.watcher()
return d, nil
@ -121,6 +153,12 @@ func (b *dnsBuilder) Scheme() string {
return "dns"
}
type netResolver interface {
LookupHost(ctx context.Context, host string) (addrs []string, err error)
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
LookupTXT(ctx context.Context, name string) (txts []string, err error)
}
// ipResolver watches for the name resolution update for an IP address.
type ipResolver struct {
cc resolver.ClientConn
@ -161,6 +199,7 @@ type dnsResolver struct {
retryCount int
host string
port string
resolver netResolver
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
@ -218,13 +257,13 @@ func (d *dnsResolver) watcher() {
func (d *dnsResolver) lookupSRV() []resolver.Address {
var newAddrs []resolver.Address
_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
if err != nil {
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
return nil
}
for _, s := range srvs {
lbAddrs, err := lookupHost(d.ctx, s.Target)
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
if err != nil {
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
continue
@ -243,7 +282,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
}
func (d *dnsResolver) lookupTXT() string {
ss, err := lookupTXT(d.ctx, d.host)
ss, err := d.resolver.LookupTXT(d.ctx, d.host)
if err != nil {
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
return ""
@ -263,7 +302,7 @@ func (d *dnsResolver) lookupTXT() string {
func (d *dnsResolver) lookupHost() []resolver.Address {
var newAddrs []resolver.Address
addrs, err := lookupHost(d.ctx, d.host)
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
if err != nil {
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
return nil
@ -305,16 +344,16 @@ func formatIP(addr string) (addrIP string, ok bool) {
return "[" + addr + "]", true
}
// parseTarget takes the user input target string, returns formatted host and port info.
// parseTarget takes the user input target string and default port, returns formatted host and port info.
// If target doesn't specify a port, set the port to be the defaultPort.
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
// are strippd when setting the host.
// examples:
// target: "www.google.com" returns host: "www.google.com", port: "443"
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
// target: ":80" returns host: "localhost", port: "80"
func parseTarget(target string) (host, port string, err error) {
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
func parseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
return "", "", errMissingAddr
}

View File

@ -1,35 +0,0 @@
// +build go1.6, !go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package dns
import (
"net"
"golang.org/x/net/context"
)
var (
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
return net.LookupSRV(service, proto, name)
}
lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
)

View File

@ -1,29 +0,0 @@
// +build go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package dns
import "net"
var (
lookupHost = net.DefaultResolver.LookupHost
lookupSRV = net.DefaultResolver.LookupSRV
lookupTXT = net.DefaultResolver.LookupTXT
)

View File

@ -23,21 +23,23 @@ import (
"strings"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/resolver"
)
// ccResolverWrapper is a wrapper on top of cc for resolvers.
// It implements resolver.ClientConnection interface.
type ccResolverWrapper struct {
cc *ClientConn
resolver resolver.Resolver
addrCh chan []resolver.Address
scCh chan string
done chan struct{}
cc *ClientConn
resolver resolver.Resolver
addrCh chan []resolver.Address
scCh chan string
done chan struct{}
lastAddressesCount int
}
// split2 returns the values from strings.SplitN(s, sep, 2).
// If sep is not found, it returns ("", s, false) instead.
// If sep is not found, it returns ("", "", false) instead.
func split2(s, sep string) (string, string, bool) {
spl := strings.SplitN(s, sep, 2)
if len(spl) < 2 {
@ -91,44 +93,6 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
return ccr, nil
}
func (ccr *ccResolverWrapper) start() {
go ccr.watcher()
}
// watcher processes address updates and service config updates sequentially.
// Otherwise, we need to resolve possible races between address and service
// config (e.g. they specify different balancer types).
func (ccr *ccResolverWrapper) watcher() {
for {
select {
case <-ccr.done:
return
default:
}
select {
case addrs := <-ccr.addrCh:
select {
case <-ccr.done:
return
default:
}
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
ccr.cc.handleResolvedAddrs(addrs, nil)
case sc := <-ccr.scCh:
select {
case <-ccr.done:
return
default:
}
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
ccr.cc.handleServiceConfig(sc)
case <-ccr.done:
return
}
}
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
ccr.resolver.ResolveNow(o)
}
@ -141,18 +105,51 @@ func (ccr *ccResolverWrapper) close() {
// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
select {
case <-ccr.addrCh:
case <-ccr.done:
return
default:
}
ccr.addrCh <- addrs
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(addrs)
}
ccr.cc.handleResolvedAddrs(addrs, nil)
}
// NewServiceConfig is called by the resolver implemenetion to send service
// configs to gRPC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
select {
case <-ccr.scCh:
case <-ccr.done:
return
default:
}
ccr.scCh <- sc
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
ccr.cc.handleServiceConfig(sc)
}
func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) {
if len(addrs) == 0 && ccr.lastAddressesCount != 0 {
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: "Resolver returns an empty address list",
Severity: channelz.CtWarning,
})
} else if len(addrs) != 0 && ccr.lastAddressesCount == 0 {
var s string
for i, a := range addrs {
if a.ServerName != "" {
s += a.Addr + "(" + a.ServerName + ")"
} else {
s += a.Addr
}
if i != len(addrs)-1 {
s += " "
}
}
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s),
Severity: channelz.CtINFO,
})
}
ccr.lastAddressesCount = len(addrs)
}

View File

@ -21,6 +21,7 @@ package grpc
import (
"bytes"
"compress/gzip"
"context"
"encoding/binary"
"fmt"
"io"
@ -31,7 +32,6 @@ import (
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
@ -155,14 +155,12 @@ func (d *gzipDecompressor) Type() string {
type callInfo struct {
compressorType string
failFast bool
stream *clientStream
traceInfo traceInfo // in trace.go
stream ClientStream
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
contentSubtype string
codec baseCodec
disableRetry bool
maxRetryRPCBufferSize int
}
@ -255,8 +253,8 @@ func (o PeerCallOption) after(c *callInfo) {
}
}
// FailFast configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If failFast is true, the RPC will fail
// WaitForReady configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If waitForReady is false, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will
// retry the call if it fails due to a transient error. gRPC will not retry if
@ -264,7 +262,14 @@ func (o PeerCallOption) after(c *callInfo) {
// the data. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
//
// By default, RPCs are "Fail Fast".
// By default, RPCs don't "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
// FailFast is the opposite of WaitForReady.
//
// Deprecated: use WaitForReady.
func FailFast(failFast bool) CallOption {
return FailFastCallOption{FailFast: failFast}
}
@ -533,7 +538,10 @@ func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte,
}
cbuf := &bytes.Buffer{}
if compressor != nil {
z, _ := compressor.Compress(cbuf)
z, err := compressor.Compress(cbuf)
if err != nil {
return nil, wrapErr(err)
}
if _, err := z.Write(in); err != nil {
return nil, wrapErr(err)
}
@ -597,20 +605,22 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
type payloadInfo struct {
wireLength int // The compressed length got from wire.
uncompressedBytes []byte
}
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return err
return nil, err
}
if inPayload != nil {
inPayload.WireLength = len(d)
if payInfo != nil {
payInfo.wireLength = len(d)
}
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
return st.Err()
return nil, st.Err()
}
if pf == compressionMade {
@ -619,33 +629,40 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
if dc != nil {
d, err = dc.Do(bytes.NewReader(d))
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
} else {
dcReader, err := compressor.Decompress(bytes.NewReader(d))
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
d, err = ioutil.ReadAll(dcReader)
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
}
if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
return d, nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
if err != nil {
return err
}
if err := c.Unmarshal(d, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
}
if inPayload != nil {
inPayload.RecvTime = time.Now()
inPayload.Payload = m
// TODO truncate large payload.
inPayload.Data = d
inPayload.Length = len(d)
if payInfo != nil {
payInfo.uncompressedBytes = d
}
return nil
}
@ -668,23 +685,17 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
// Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown.
//
// Deprecated: use status.FromError and Code method instead.
// Deprecated: use status.Code instead.
func Code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
return status.Code(err)
}
// ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated: use status.FromError and Message method instead.
// Deprecated: use status.Convert and Message method instead.
func ErrorDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
return status.Convert(err).Message()
}
// Errorf returns an error containing an error code and a description;
@ -695,6 +706,31 @@ func Errorf(c codes.Code, format string, a ...interface{}) error {
return status.Errorf(c, format, a...)
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if err == nil || err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
return status.Error(codes.Internal, err.Error())
}
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// setCallInfoCodec should only be called after CallOptions have been applied.
func setCallInfoCodec(c *callInfo) error {
if c.codec != nil {
@ -750,6 +786,19 @@ func parseDialTarget(target string) (net string, addr string) {
return net, target
}
// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
type channelzData struct {
callsStarted int64
callsFailed int64
callsSucceeded int64
// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
lastCallStartedTime int64
}
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
// support package version is 5.

Some files were not shown because too many files have changed in this diff Show More