diff --git a/go.mod b/go.mod
index a98b6572c..c804b9555 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,6 @@ module github.com/hashicorp/terraform
require (
cloud.google.com/go v0.15.0
- contrib.go.opencensus.io/exporter/stackdriver v0.6.0 // indirect
github.com/Azure/azure-sdk-for-go v10.3.0-beta+incompatible
github.com/Azure/go-autorest v8.3.1+incompatible
github.com/Azure/go-ntlmssp v0.0.0-20170803034930-c92175d54006 // indirect
@@ -20,6 +19,7 @@ require (
github.com/armon/go-radix v0.0.0-20160115234725-4239b77079c7 // indirect
github.com/aws/aws-sdk-go v1.14.31
github.com/beevik/etree v0.0.0-20171015221209-af219c0c7ea1 // indirect
+ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/bgentry/speakeasy v0.0.0-20161015143505-675b82c74c0e // indirect
github.com/blang/semver v0.0.0-20170202183821-4a1e882c79dc
@@ -90,6 +90,7 @@ require (
github.com/mattn/go-colorable v0.0.0-20160220075935-9cbef7c35391
github.com/mattn/go-isatty v0.0.0-20161123143637-30a891c33c7c // indirect
github.com/mattn/go-shellwords v1.0.1
+ github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/miekg/dns v1.0.8 // indirect
github.com/mitchellh/cli v0.0.0-20171129193617-33edc47170b5
github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286
@@ -109,9 +110,10 @@ require (
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 // indirect
github.com/posener/complete v0.0.0-20171219111128-6bee943216c8
- github.com/prometheus/client_golang v0.9.0 // indirect
- github.com/prometheus/common v0.0.0-20181015124227-bcb74de08d37 // indirect
- github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect
+ github.com/prometheus/client_golang v0.8.0 // indirect
+ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
+ github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect
+ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect
github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07 // indirect
github.com/satori/uuid v0.0.0-20160927100844-b061729afc07 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
@@ -128,13 +130,15 @@ require (
github.com/xanzy/ssh-agent v0.1.0
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect
github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557
- github.com/zclconf/go-cty v0.0.0-20180907002636-07dee8a1cfd4
- go.opencensus.io v0.17.0 // indirect
- golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
- golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
+ github.com/zclconf/go-cty v0.0.0-20180925180032-d9b87d891d0b
+ golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b
+ golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7
+ golang.org/x/oauth2 v0.0.0-20170928010508-bb50c06baba3
+ golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e // indirect
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect
- google.golang.org/api v0.0.0-20180921000521-920bb1beccf7
+ google.golang.org/api v0.0.0-20171005000305-7a7376eff6a5
+ google.golang.org/appengine v1.2.0 // indirect
+ google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 // indirect
google.golang.org/grpc v1.14.0
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
)
diff --git a/go.sum b/go.sum
index d1acb8316..d4329a7df 100644
--- a/go.sum
+++ b/go.sum
@@ -1,8 +1,5 @@
cloud.google.com/go v0.15.0 h1:/e2wXYguItvFu4fJCvhMRPIwwrimuUxI+aCVx/ahLjg=
cloud.google.com/go v0.15.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-contrib.go.opencensus.io/exporter/stackdriver v0.6.0 h1:U0FQWsZU3aO8W+BrZc88T8fdd24qe3Phawa9V9oaVUE=
-contrib.go.opencensus.io/exporter/stackdriver v0.6.0/go.mod h1:QeFzMJDAw8TXt5+aRaSuE8l5BwaMIOIlaVkBOPRuMuw=
-git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/azure-sdk-for-go v10.3.0-beta+incompatible h1:TP+nmGmOP7psi7CvIq/1pCliRBRj73vmMTDjaPrTnr8=
github.com/Azure/azure-sdk-for-go v10.3.0-beta+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest v8.3.1+incompatible h1:1+jMCOJcCh3GmI7FGJVOo8AlfPWDyjS7fLbbkZGzEGY=
@@ -233,7 +230,6 @@ github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
-github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 h1:m3CEgv3ah1Rhy82L+c0QG/U3VyY1UsvsIdkh0/rU97Y=
github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
@@ -244,17 +240,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v0.0.0-20171219111128-6bee943216c8 h1:lcb1zvdlaZyEbl2OXifN3uOYYyIvllofUbmp9bwbL+0=
github.com/posener/complete v0.0.0-20171219111128-6bee943216c8/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY=
-github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181015124227-bcb74de08d37 h1:Y7YdJ9Xb3MoQOzAWXnDunAJYpvhVwZdTirNfGUgPKaA=
-github.com/prometheus/common v0.0.0-20181015124227-bcb74de08d37/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07 h1:DEZDfcCVq3xDJrjqdCgyN/dHYVoqR92MCsdqCdxmnhM=
github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/satori/uuid v0.0.0-20160927100844-b061729afc07 h1:81vvGlnI/AZ1/TxGDirw3ofUoS64TyjmPQt5C9XODTw=
@@ -295,35 +288,37 @@ github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q
github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 h1:Jpn2j6wHkC9wJv5iMfJhKqrZJx3TahFx+7sbZ7zQdxs=
github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/zclconf/go-cty v0.0.0-20180815031001-58bb2bc0302a/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
-github.com/zclconf/go-cty v0.0.0-20180907002636-07dee8a1cfd4 h1:C02D0gjAVFMKqFUaZvaZK2YWGK1HAQwVTZWDAENYDjA=
-github.com/zclconf/go-cty v0.0.0-20180907002636-07dee8a1cfd4/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
-go.opencensus.io v0.17.0 h1:2Cu88MYg+1LU+WVD+NWwYhyP0kKgRlN9QjWGaX0jKTE=
-go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
+github.com/zclconf/go-cty v0.0.0-20180925180032-d9b87d891d0b h1:9rQAtgrPBuyPjmPEcx4pqJs6D+u41FYbbVE/hhdsrtk=
+github.com/zclconf/go-cty v0.0.0-20180925180032-d9b87d891d0b/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87 h1:gCHhzI+1R9peHIMyiWVxoVaWlk1cYK7VThX5ptLtbXY=
golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ=
+golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332 h1:efGso+ep0DjyCBJPjvoz0HI6UldX4Md2F1rZFe1ir0E=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7 h1:zKzVgSQ8WOSHzD7I4k8LQjrHUUCNOlBsgc0PcYLVNnY=
+golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/oauth2 v0.0.0-20170928010508-bb50c06baba3 h1:YGx0PRKSN/2n/OcdFycCC0JUA/Ln+i5lPcN8VoNDus0=
+golang.org/x/oauth2 v0.0.0-20170928010508-bb50c06baba3/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c h1:uHnKXcvx6SNkuwC+nrzxkJ+TpPwZOtumbhWrrOYN5YA=
golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e h1:LSlw/Dbj0MkNvPYAAkGinYmGliq+aqS7eKPYlE4oWC4=
+golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20180921000521-920bb1beccf7 h1:XKT3Wlpn+o6Car1ot74Z4R+R9CeRfITCLZb0Q9/mpx4=
-google.golang.org/api v0.0.0-20180921000521-920bb1beccf7/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20171005000305-7a7376eff6a5 h1:PDkJGYjSvxJyevtZRGmBSO+HjbIKuqYEEc8gB51or4o=
+google.golang.org/api v0.0.0-20171005000305-7a7376eff6a5/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
-google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/appengine v1.2.0 h1:S0iUepdCWODXRvtE+gcRDd15L+k+k1AiHlMiMjefH24=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63 h1:yNBw5bwywOTguAu+h6SkCUaWdEZ7ZXgfiwb2YTN1eQw=
+google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
deleted file mode 100644
index e491a9e7f..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/propagation/http.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/propagation/http.go
deleted file mode 100644
index 1797d3726..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/propagation/http.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package propagation implement X-Cloud-Trace-Context header propagation used
-// by Google Cloud products.
-package propagation // import "contrib.go.opencensus.io/exporter/stackdriver/propagation"
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const (
- httpHeaderMaxSize = 200
- httpHeader = `X-Cloud-Trace-Context`
-)
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// HTTPFormat implements propagation.HTTPFormat to propagate
-// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
-type HTTPFormat struct{}
-
-// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h := req.Header.Get(httpHeader)
- // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
- // Return if the header is empty or missing, or if the header is unreasonably
- // large, to avoid making unnecessary copies of a large string.
- if h == "" || len(h) > httpHeaderMaxSize {
- return trace.SpanContext{}, false
- }
-
- // Parse the trace id field.
- slash := strings.Index(h, `/`)
- if slash == -1 {
- return trace.SpanContext{}, false
- }
- tid, h := h[:slash], h[slash+1:]
-
- buf, err := hex.DecodeString(tid)
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], buf)
-
- // Parse the span id field.
- spanstr := h
- semicolon := strings.Index(h, `;`)
- if semicolon != -1 {
- spanstr, h = h[:semicolon], h[semicolon+1:]
- }
- sid, err := strconv.ParseUint(spanstr, 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- binary.BigEndian.PutUint64(sc.SpanID[:], sid)
-
- // Parse the options field, options field is optional.
- if !strings.HasPrefix(h, "o=") {
- return sc, true
- }
- o, err := strconv.ParseUint(h[2:], 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(o)
- return sc, true
-}
-
-// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- sid := binary.BigEndian.Uint64(sc.SpanID[:])
- header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
- req.Header.Set(httpHeader, header)
-}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go
new file mode 100644
index 000000000..aa15b7bde
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go
@@ -0,0 +1,385 @@
+package stdlib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/zclconf/go-cty/cty"
+ "github.com/zclconf/go-cty/cty/function"
+)
+
+var FormatDateFunc = function.New(&function.Spec{
+ Params: []function.Parameter{
+ {
+ Name: "format",
+ Type: cty.String,
+ },
+ {
+ Name: "time",
+ Type: cty.String,
+ },
+ },
+ Type: function.StaticReturnType(cty.String),
+ Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
+ formatStr := args[0].AsString()
+ timeStr := args[1].AsString()
+ t, err := parseTimestamp(timeStr)
+ if err != nil {
+ return cty.DynamicVal, function.NewArgError(1, err)
+ }
+
+ var buf bytes.Buffer
+ sc := bufio.NewScanner(strings.NewReader(formatStr))
+ sc.Split(splitDateFormat)
+ const esc = '\''
+ for sc.Scan() {
+ tok := sc.Bytes()
+
+ // The leading byte signals the token type
+ switch {
+ case tok[0] == esc:
+ if tok[len(tok)-1] != esc || len(tok) == 1 {
+ return cty.DynamicVal, function.NewArgErrorf(0, "unterminated literal '")
+ }
+ if len(tok) == 2 {
+ // Must be a single escaped quote, ''
+ buf.WriteByte(esc)
+ } else {
+ // The content (until a closing esc) is printed out verbatim
+ // except that we must un-double any double-esc escapes in
+ // the middle of the string.
+ raw := tok[1 : len(tok)-1]
+ for i := 0; i < len(raw); i++ {
+ buf.WriteByte(raw[i])
+ if raw[i] == esc {
+ i++ // skip the escaped quote
+ }
+ }
+ }
+
+ case startsDateFormatVerb(tok[0]):
+ switch tok[0] {
+ case 'Y':
+ y := t.Year()
+ switch len(tok) {
+ case 2:
+ fmt.Fprintf(&buf, "%02d", y%100)
+ case 4:
+ fmt.Fprintf(&buf, "%04d", y)
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: year must either be \"YY\" or \"YYYY\"", tok)
+ }
+ case 'M':
+ m := t.Month()
+ switch len(tok) {
+ case 1:
+ fmt.Fprintf(&buf, "%d", m)
+ case 2:
+ fmt.Fprintf(&buf, "%02d", m)
+ case 3:
+ buf.WriteString(m.String()[:3])
+ case 4:
+ buf.WriteString(m.String())
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: month must be \"M\", \"MM\", \"MMM\", or \"MMMM\"", tok)
+ }
+ case 'D':
+ d := t.Day()
+ switch len(tok) {
+ case 1:
+ fmt.Fprintf(&buf, "%d", d)
+ case 2:
+ fmt.Fprintf(&buf, "%02d", d)
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of month must either be \"D\" or \"DD\"", tok)
+ }
+ case 'E':
+ d := t.Weekday()
+ switch len(tok) {
+ case 3:
+ buf.WriteString(d.String()[:3])
+ case 4:
+ buf.WriteString(d.String())
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of week must either be \"EEE\" or \"EEEE\"", tok)
+ }
+ case 'h':
+ h := t.Hour()
+ switch len(tok) {
+ case 1:
+ fmt.Fprintf(&buf, "%d", h)
+ case 2:
+ fmt.Fprintf(&buf, "%02d", h)
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 24-hour must either be \"h\" or \"hh\"", tok)
+ }
+ case 'H':
+ h := t.Hour() % 12
+ if h == 0 {
+ h = 12
+ }
+ switch len(tok) {
+ case 1:
+ fmt.Fprintf(&buf, "%d", h)
+ case 2:
+ fmt.Fprintf(&buf, "%02d", h)
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 12-hour must either be \"H\" or \"HH\"", tok)
+ }
+ case 'A', 'a':
+ if len(tok) != 2 {
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: must be \"%s%s\"", tok, tok[0:1], tok[0:1])
+ }
+ upper := tok[0] == 'A'
+ switch t.Hour() / 12 {
+ case 0:
+ if upper {
+ buf.WriteString("AM")
+ } else {
+ buf.WriteString("am")
+ }
+ case 1:
+ if upper {
+ buf.WriteString("PM")
+ } else {
+ buf.WriteString("pm")
+ }
+ }
+ case 'm':
+ m := t.Minute()
+ switch len(tok) {
+ case 1:
+ fmt.Fprintf(&buf, "%d", m)
+ case 2:
+ fmt.Fprintf(&buf, "%02d", m)
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: minute must either be \"m\" or \"mm\"", tok)
+ }
+ case 's':
+ s := t.Second()
+ switch len(tok) {
+ case 1:
+ fmt.Fprintf(&buf, "%d", s)
+ case 2:
+ fmt.Fprintf(&buf, "%02d", s)
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: second must either be \"s\" or \"ss\"", tok)
+ }
+ case 'Z':
+ // We'll just lean on Go's own formatter for this one, since
+ // the necessary information is unexported.
+ switch len(tok) {
+ case 1:
+ buf.WriteString(t.Format("Z07:00"))
+ case 3:
+ str := t.Format("-0700")
+ switch str {
+ case "+0000":
+ buf.WriteString("UTC")
+ default:
+ buf.WriteString(str)
+ }
+ case 4:
+ buf.WriteString(t.Format("-0700"))
+ case 5:
+ buf.WriteString(t.Format("-07:00"))
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: timezone must be Z, ZZZZ, or ZZZZZ", tok)
+ }
+ default:
+ return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q", tok)
+ }
+
+ default:
+ // Any other starting character indicates a literal sequence
+ buf.Write(tok)
+ }
+ }
+
+ return cty.StringVal(buf.String()), nil
+ },
+})
+
+// FormatDate reformats a timestamp given in RFC3339 syntax into another time
+// syntax defined by a given format string.
+//
+// The format string uses letter mnemonics to represent portions of the
+// timestamp, with repetition signifying length variants of each portion.
+// Single quote characters ' can be used to quote sequences of literal letters
+// that should not be interpreted as formatting mnemonics.
+//
+// The full set of supported mnemonic sequences is listed below:
+//
+// YY Year modulo 100 zero-padded to two digits, like "06".
+// YYYY Four (or more) digit year, like "2006".
+// M Month number, like "1" for January.
+// MM Month number zero-padded to two digits, like "01".
+// MMM English month name abbreviated to three letters, like "Jan".
+// MMMM English month name unabbreviated, like "January".
+// D Day of month number, like "2".
+// DD Day of month number zero-padded to two digits, like "02".
+// EEE English day of week name abbreviated to three letters, like "Mon".
+// EEEE English day of week name unabbreviated, like "Monday".
+// h 24-hour number, like "2".
+// hh 24-hour number zero-padded to two digits, like "02".
+// H 12-hour number, like "2".
+// HH 12-hour number zero-padded to two digits, like "02".
+// AA Hour AM/PM marker in uppercase, like "AM".
+// aa Hour AM/PM marker in lowercase, like "am".
+// m Minute within hour, like "5".
+// mm Minute within hour zero-padded to two digits, like "05".
+// s Second within minute, like "9".
+// ss Second within minute zero-padded to two digits, like "09".
+// ZZZZ Timezone offset with just sign and digit, like "-0800".
+// ZZZZZ Timezone offset with colon separating hours and minutes, like "-08:00".
+// Z Like ZZZZZ but with a special case "Z" for UTC.
+// ZZZ Like ZZZZ but with a special case "UTC" for UTC.
+//
+// The format syntax is optimized mainly for generating machine-oriented
+// timestamps rather than human-oriented timestamps; the English language
+// portions of the output reflect the use of English names in a number of
+// machine-readable date formatting standards. For presentation to humans,
+// a locale-aware time formatter (not included in this package) is a better
+// choice.
+//
+// The format syntax is not compatible with that of any other language, but
+// is optimized so that patterns for common standard date formats can be
+// recognized quickly even by a reader unfamiliar with the format syntax.
+func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) {
+ return FormatDateFunc.Call([]cty.Value{format, timestamp})
+}
+
+func parseTimestamp(ts string) (time.Time, error) {
+ t, err := time.Parse(time.RFC3339, ts)
+ if err != nil {
+ switch err := err.(type) {
+ case *time.ParseError:
+ // If err is s time.ParseError then its string representation is not
+ // appropriate since it relies on details of Go's strange date format
+ // representation, which a caller of our functions is not expected
+ // to be familiar with.
+ //
+ // Therefore we do some light transformation to get a more suitable
+ // error that should make more sense to our callers. These are
+ // still not awesome error messages, but at least they refer to
+ // the timestamp portions by name rather than by Go's example
+ // values.
+ if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" {
+ // For some reason err.Message is populated with a ": " prefix
+ // by the time package.
+ return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message)
+ }
+ var what string
+ switch err.LayoutElem {
+ case "2006":
+ what = "year"
+ case "01":
+ what = "month"
+ case "02":
+ what = "day of month"
+ case "15":
+ what = "hour"
+ case "04":
+ what = "minute"
+ case "05":
+ what = "second"
+ case "Z07:00":
+ what = "UTC offset"
+ case "T":
+ return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'")
+ case ":", "-":
+ if err.ValueElem == "" {
+ return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem)
+ } else {
+ return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem)
+ }
+ default:
+ // Should never get here, because time.RFC3339 includes only the
+ // above portions, but since that might change in future we'll
+ // be robust here.
+ what = "timestamp segment"
+ }
+ if err.ValueElem == "" {
+ return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what)
+ } else {
+ return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what)
+ }
+ }
+ return time.Time{}, err
+ }
+ return t, nil
+}
+
+// splitDataFormat is a bufio.SplitFunc used to tokenize a date format.
+func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if len(data) == 0 {
+ return 0, nil, nil
+ }
+
+ const esc = '\''
+
+ switch {
+
+ case data[0] == esc:
+ // If we have another quote immediately after then this is a single
+ // escaped escape.
+ if len(data) > 1 && data[1] == esc {
+ return 2, data[:2], nil
+ }
+
+ // Beginning of quoted sequence, so we will seek forward until we find
+ // the closing quote, ignoring escaped quotes along the way.
+ for i := 1; i < len(data); i++ {
+ if data[i] == esc {
+ if (i + 1) == len(data) {
+ // We need at least one more byte to decide if this is an
+ // escape or a terminator.
+ return 0, nil, nil
+ }
+ if data[i+1] == esc {
+ i++ // doubled-up quotes are an escape sequence
+ continue
+ }
+ // We've found the closing quote
+ return i + 1, data[:i+1], nil
+ }
+ }
+ // If we fall out here then we need more bytes to find the end,
+ // unless we're already at the end with an unclosed quote.
+ if atEOF {
+ return len(data), data, nil
+ }
+ return 0, nil, nil
+
+ case startsDateFormatVerb(data[0]):
+ rep := data[0]
+ for i := 1; i < len(data); i++ {
+ if data[i] != rep {
+ return i, data[:i], nil
+ }
+ }
+ if atEOF {
+ return len(data), data, nil
+ }
+ // We need more data to decide if we've found the end
+ return 0, nil, nil
+
+ default:
+ for i := 1; i < len(data); i++ {
+ if data[i] == esc || startsDateFormatVerb(data[i]) {
+ return i, data[:i], nil
+ }
+ }
+ // We might not actually be at the end of a literal sequence,
+ // but that doesn't matter since we'll concat them back together
+ // anyway.
+ return len(data), data, nil
+ }
+}
+
+func startsDateFormatVerb(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z')
+}
diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go
index b14e28730..16e1c0932 100644
--- a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go
+++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go
@@ -138,7 +138,10 @@ func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value
return cty.DynamicVal, path.NewErrorf("a list is required")
}
- if length == 0 {
+ switch {
+ case length < 0:
+ return cty.NullVal(cty.List(ety)), nil
+ case length == 0:
return cty.ListValEmpty(ety), nil
}
@@ -166,7 +169,10 @@ func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value,
return cty.DynamicVal, path.NewErrorf("a set is required")
}
- if length == 0 {
+ switch {
+ case length < 0:
+ return cty.NullVal(cty.Set(ety)), nil
+ case length == 0:
return cty.SetValEmpty(ety), nil
}
@@ -194,7 +200,10 @@ func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value,
return cty.DynamicVal, path.NewErrorf("a map is required")
}
- if length == 0 {
+ switch {
+ case length < 0:
+ return cty.NullVal(cty.Map(ety)), nil
+ case length == 0:
return cty.MapValEmpty(ety), nil
}
@@ -227,7 +236,12 @@ func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.V
return cty.DynamicVal, path.NewErrorf("a tuple is required")
}
- if length != len(etys) {
+ switch {
+ case length < 0:
+ return cty.NullVal(cty.Tuple(etys)), nil
+ case length == 0:
+ return cty.TupleVal(nil), nil
+ case length != len(etys):
return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys))
}
@@ -256,7 +270,12 @@ func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Pa
return cty.DynamicVal, path.NewErrorf("an object is required")
}
- if length != len(atys) {
+ switch {
+ case length < 0:
+ return cty.NullVal(cty.Object(atys)), nil
+ case length == 0:
+ return cty.ObjectVal(nil), nil
+ case length != len(atys):
return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required", len(atys))
}
@@ -293,7 +312,10 @@ func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) {
return cty.DynamicVal, path.NewError(err)
}
- if length != 2 {
+ switch {
+ case length == -1:
+ return cty.NullVal(cty.DynamicPseudoType), nil
+ case length != 2:
return cty.DynamicVal, path.NewErrorf(
"dynamic value array must have exactly two elements",
)
diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go
index 2089fb324..9dcfdbf56 100644
--- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go
+++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go
@@ -14,16 +14,15 @@ func (val Value) GoString() string {
return "cty.NilVal"
}
- if val.ty == DynamicPseudoType {
- return "cty.DynamicVal"
- }
-
- if !val.IsKnown() {
- return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
- }
if val.IsNull() {
return fmt.Sprintf("cty.NullVal(%#v)", val.ty)
}
+ if val == DynamicVal { // is unknown, so must be before the IsKnown check below
+ return "cty.DynamicVal"
+ }
+ if !val.IsKnown() {
+ return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
+ }
// By the time we reach here we've dealt with all of the exceptions around
// unknowns and nulls, so we're guaranteed that the values are the
diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore
deleted file mode 100644
index 74a6db472..000000000
--- a/vendor/go.opencensus.io/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-/.idea/
-
-# go.opencensus.io/exporter/aws
-/exporter/aws/
-
-# Exclude vendor, use dep ensure after checkout:
-/vendor/github.com/
-/vendor/golang.org/
-/vendor/google.golang.org/
diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml
deleted file mode 100644
index 2d6daa6b2..000000000
--- a/vendor/go.opencensus.io/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-language: go
-
-go:
- # 1.8 is tested by AppVeyor
- - 1.10.x
-
-go_import_path: go.opencensus.io
-
-# Don't email me the results of the test runs.
-notifications:
- email: false
-
-before_script:
- - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
- - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
- - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release
- - go get github.com/rakyll/embedmd
-
-script:
- - embedmd -d README.md # Ensure embedded code is up-to-date
- - dep ensure -v
- - go build ./... # Ensure dependency updates don't break build
- - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
- - go vet ./...
- - go test -v -race $PKGS # Run all the tests with the race detector enabled
- - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
- - go run internal/check/version.go
diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS
deleted file mode 100644
index e491a9e7f..000000000
--- a/vendor/go.opencensus.io/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md
deleted file mode 100644
index 3f3aed396..000000000
--- a/vendor/go.opencensus.io/CONTRIBUTING.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# How to contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult [GitHub Help] for more
-information on using pull requests.
-
-[GitHub Help]: https://help.github.com/articles/about-pull-requests/
-
-## Instructions
-
-Fork the repo, checkout the upstream repo to your GOPATH by:
-
-```
-$ go get -d go.opencensus.io
-```
-
-Add your fork as an origin:
-
-```
-cd $(go env GOPATH)/src/go.opencensus.io
-git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
-```
-
-Run tests:
-
-```
-$ go test ./...
-```
-
-Checkout a new branch, make modifications and push the branch to your fork:
-
-```
-$ git checkout -b feature
-# edit files
-$ git commit
-$ git push fork feature
-```
-
-Open a pull request against the main opencensus-go repo.
diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock
deleted file mode 100644
index 3be12ac8f..000000000
--- a/vendor/go.opencensus.io/Gopkg.lock
+++ /dev/null
@@ -1,231 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- branch = "master"
- digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c"
- name = "git.apache.org/thrift.git"
- packages = ["lib/go/thrift"]
- pruneopts = "UT"
- revision = "6e67faa92827ece022380b211c2caaadd6145bf5"
- source = "github.com/apache/thrift"
-
-[[projects]]
- branch = "master"
- digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = "UT"
- revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
-
-[[projects]]
- digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/timestamp",
- ]
- pruneopts = "UT"
- revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = "UT"
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1"
- name = "github.com/openzipkin/zipkin-go"
- packages = [
- ".",
- "idgenerator",
- "model",
- "propagation",
- "reporter",
- "reporter/http",
- ]
- pruneopts = "UT"
- revision = "d455a5674050831c1e187644faa4046d653433c2"
- version = "v0.1.1"
-
-[[projects]]
- digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/promhttp",
- ]
- pruneopts = "UT"
- revision = "c5b7fccd204277076155f10851dad72b76a49317"
- version = "v0.8.0"
-
-[[projects]]
- branch = "master"
- digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = "UT"
- revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
-
-[[projects]]
- branch = "master"
- digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = "UT"
- revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
-
-[[projects]]
- branch = "master"
- digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/util",
- "nfs",
- "xfs",
- ]
- pruneopts = "UT"
- revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
-
-[[projects]]
- branch = "master"
- digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9"
- name = "golang.org/x/net"
- packages = [
- "context",
- "http/httpguts",
- "http2",
- "http2/hpack",
- "idna",
- "internal/timeseries",
- "trace",
- ]
- pruneopts = "UT"
- revision = "922f4815f713f213882e8ef45e0d315b164d705c"
-
-[[projects]]
- branch = "master"
- digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a"
- name = "golang.org/x/sync"
- packages = ["semaphore"]
- pruneopts = "UT"
- revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
-
-[[projects]]
- branch = "master"
- digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8"
- name = "golang.org/x/sys"
- packages = ["unix"]
- pruneopts = "UT"
- revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec"
-
-[[projects]]
- digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
- name = "golang.org/x/text"
- packages = [
- "collate",
- "collate/build",
- "internal/colltab",
- "internal/gen",
- "internal/tag",
- "internal/triegen",
- "internal/ucd",
- "language",
- "secure/bidirule",
- "transform",
- "unicode/bidi",
- "unicode/cldr",
- "unicode/norm",
- "unicode/rangetable",
- ]
- pruneopts = "UT"
- revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
- version = "v0.3.0"
-
-[[projects]]
- branch = "master"
- digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530"
- name = "google.golang.org/api"
- packages = ["support/bundler"]
- pruneopts = "UT"
- revision = "e21acd801f91da814261b938941d193bb036441a"
-
-[[projects]]
- branch = "master"
- digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
- name = "google.golang.org/genproto"
- packages = ["googleapis/rpc/status"]
- pruneopts = "UT"
- revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4"
-
-[[projects]]
- digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d"
- name = "google.golang.org/grpc"
- packages = [
- ".",
- "balancer",
- "balancer/base",
- "balancer/roundrobin",
- "codes",
- "connectivity",
- "credentials",
- "encoding",
- "encoding/proto",
- "grpclog",
- "internal",
- "internal/backoff",
- "internal/channelz",
- "internal/envconfig",
- "internal/grpcrand",
- "internal/transport",
- "keepalive",
- "metadata",
- "naming",
- "peer",
- "resolver",
- "resolver/dns",
- "resolver/passthrough",
- "stats",
- "status",
- "tap",
- ]
- pruneopts = "UT"
- revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
- version = "v1.14.0"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "git.apache.org/thrift.git/lib/go/thrift",
- "github.com/golang/protobuf/proto",
- "github.com/openzipkin/zipkin-go",
- "github.com/openzipkin/zipkin-go/model",
- "github.com/openzipkin/zipkin-go/reporter",
- "github.com/openzipkin/zipkin-go/reporter/http",
- "github.com/prometheus/client_golang/prometheus",
- "github.com/prometheus/client_golang/prometheus/promhttp",
- "golang.org/x/net/context",
- "golang.org/x/net/http2",
- "google.golang.org/api/support/bundler",
- "google.golang.org/grpc",
- "google.golang.org/grpc/codes",
- "google.golang.org/grpc/grpclog",
- "google.golang.org/grpc/metadata",
- "google.golang.org/grpc/stats",
- "google.golang.org/grpc/status",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml
deleted file mode 100644
index a9f3cd68e..000000000
--- a/vendor/go.opencensus.io/Gopkg.toml
+++ /dev/null
@@ -1,36 +0,0 @@
-# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y"
-# to avoid locking to a particular minor version which can cause dep to not be
-# able to find a satisfying dependency graph.
-
-[[constraint]]
- branch = "master"
- name = "git.apache.org/thrift.git"
- source = "github.com/apache/thrift"
-
-[[constraint]]
- name = "github.com/golang/protobuf"
- version = "1.0.0"
-
-[[constraint]]
- name = "github.com/openzipkin/zipkin-go"
- version = ">=0.1.0"
-
-[[constraint]]
- name = "github.com/prometheus/client_golang"
- version = ">=0.8.0"
-
-[[constraint]]
- branch = "master"
- name = "golang.org/x/net"
-
-[[constraint]]
- branch = "master"
- name = "google.golang.org/api"
-
-[[constraint]]
- name = "google.golang.org/grpc"
- version = "1.11.3"
-
-[prune]
- go-tests = true
- unused-packages = true
diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE
deleted file mode 100644
index 7a4a3ea24..000000000
--- a/vendor/go.opencensus.io/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md
deleted file mode 100644
index e3a338e11..000000000
--- a/vendor/go.opencensus.io/README.md
+++ /dev/null
@@ -1,262 +0,0 @@
-# OpenCensus Libraries for Go
-
-[![Build Status][travis-image]][travis-url]
-[![Windows Build Status][appveyor-image]][appveyor-url]
-[![GoDoc][godoc-image]][godoc-url]
-[![Gitter chat][gitter-image]][gitter-url]
-
-OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
-collecting application performance and behavior monitoring data.
-Currently it consists of three major components: tags, stats, and tracing.
-
-## Installation
-
-```
-$ go get -u go.opencensus.io
-```
-
-The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
-The use of vendoring or a dependency management tool is recommended.
-
-## Prerequisites
-
-OpenCensus Go libraries require Go 1.8 or later.
-
-## Getting Started
-
-The easiest way to get started using OpenCensus in your application is to use an existing
-integration with your RPC framework:
-
-* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
-* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
-* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql)
-* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
-* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
-* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
-* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
-* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
-* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
-* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
-
-If you're a framework not listed here, you could either implement your own middleware for your
-framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
-
-## Exporters
-
-OpenCensus can export instrumentation data to various backends.
-OpenCensus has exporter implementations for the following, users
-can implement their own exporters by implementing the exporter interfaces
-([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
-[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
-
-* [Prometheus][exporter-prom] for stats
-* [OpenZipkin][exporter-zipkin] for traces
-* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
-* [Jaeger][exporter-jaeger] for traces
-* [AWS X-Ray][exporter-xray] for traces
-* [Datadog][exporter-datadog] for stats and traces
-* [Graphite][exporter-graphite] for stats
-
-## Overview
-
-![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
-
-In a microservices environment, a user request may go through
-multiple services until there is a response. OpenCensus allows
-you to instrument your services and collect diagnostics data all
-through your services end-to-end.
-
-## Tags
-
-Tags represent propagated key-value pairs. They are propagated using `context.Context`
-in the same process or can be encoded to be transmitted on the wire. Usually, this will
-be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
-for gRPC.
-
-Package tag allows adding or modifying tags in the current context.
-
-[embedmd]:# (internal/readme/tags.go new)
-```go
-ctx, err = tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Upsert(userIDKey, "cde36753ed"),
-)
-if err != nil {
- log.Fatal(err)
-}
-```
-
-## Stats
-
-OpenCensus is a low-overhead framework even if instrumentation is always enabled.
-In order to be so, it is optimized to make recording of data points fast
-and separate from the data aggregation.
-
-OpenCensus stats collection happens in two stages:
-
-* Definition of measures and recording of data points
-* Definition of views and aggregation of the recorded data
-
-### Recording
-
-Measurements are data points associated with a measure.
-Recording implicitly tags the set of Measurements with the tags from the
-provided context:
-
-[embedmd]:# (internal/readme/stats.go record)
-```go
-stats.Record(ctx, videoSize.M(102478))
-```
-
-### Views
-
-Views are how Measures are aggregated. You can think of them as queries over the
-set of recorded data points (measurements).
-
-Views have two parts: the tags to group by and the aggregation type used.
-
-Currently three types of aggregations are supported:
-* CountAggregation is used to count the number of times a sample was recorded.
-* DistributionAggregation is used to provide a histogram of the values of the samples.
-* SumAggregation is used to sum up all sample values.
-
-[embedmd]:# (internal/readme/stats.go aggs)
-```go
-distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
-countAgg := view.Count()
-sumAgg := view.Sum()
-```
-
-Here we create a view with the DistributionAggregation over our measure.
-
-[embedmd]:# (internal/readme/stats.go view)
-```go
-if err := view.Register(&view.View{
- Name: "example.com/video_size_distribution",
- Description: "distribution of processed video size over time",
- Measure: videoSize,
- Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32),
-}); err != nil {
- log.Fatalf("Failed to register view: %v", err)
-}
-```
-
-Register begins collecting data for the view. Registered views' data will be
-exported via the registered exporters.
-
-## Traces
-
-A distributed trace tracks the progression of a single user request as
-it is handled by the services and processes that make up an application.
-Each step is called a span in the trace. Spans include metadata about the step,
-including especially the time spent in the step, called the span’s latency.
-
-Below you see a trace and several spans underneath it.
-
-![Traces and spans](https://i.imgur.com/7hZwRVj.png)
-
-### Spans
-
-Span is the unit step in a trace. Each span has a name, latency, status and
-additional metadata.
-
-Below we are starting a span for a cache read and ending it
-when we are done:
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-### Propagation
-
-Spans can have parents or can be root spans if they don't have any parents.
-The current span is propagated in-process and across the network to allow associating
-new child spans with the parent.
-
-In the same process, context.Context is used to propagate spans.
-trace.StartSpan creates a new span as a root if the current context
-doesn't contain a span. Or, it creates a child of the span that is
-already in current context. The returned context can be used to keep
-propagating the newly created span in the current context.
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-Across the network, OpenCensus provides different propagation
-methods for different protocols.
-
-* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
-* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
- by default but can be configured to use a custom propagation method by setting another
- [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
-
-## Execution Tracer
-
-With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
-See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
-for an example of their mutual use.
-
-## Profiles
-
-OpenCensus tags can be applied as profiler labels
-for users who are on Go 1.9 and above.
-
-[embedmd]:# (internal/readme/tags.go profiler)
-```go
-ctx, err = tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Insert(userIDKey, "fff0989878"),
-)
-if err != nil {
- log.Fatal(err)
-}
-tag.Do(ctx, func(ctx context.Context) {
- // Do work.
- // When profiling is on, samples will be
- // recorded with the key/values from the tag map.
-})
-```
-
-A screenshot of the CPU profile from the program above:
-
-![CPU profile](https://i.imgur.com/jBKjlkw.png)
-
-## Deprecation Policy
-
-Before version 1.0.0, the following deprecation policy will be observed:
-
-No backwards-incompatible changes will be made except for the removal of symbols that have
-been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
-removing the *Deprecated* functionality will be made no sooner than 28 days after the first
-release in which the functionality was marked *Deprecated*.
-
-[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
-[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
-[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
-[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
-[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
-[godoc-url]: https://godoc.org/go.opencensus.io
-[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
-[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
-
-
-[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
-[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
-
-[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
-[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
-[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
-[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
-[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
-[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
-[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
-
diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml
deleted file mode 100644
index 5aa067183..000000000
--- a/vendor/go.opencensus.io/appveyor.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: "{build}"
-
-platform: x64
-
-clone_folder: c:\gopath\src\go.opencensus.io
-
-environment:
- GOPATH: c:\gopath
- GOVERSION: 1.8
-
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
- - go env
-
-build: false
-deploy: false
-
-test_script:
- - cd %APPVEYOR_BUILD_FOLDER%
- - gofmt -w .
- - go get -v -t .\...
- - go test -race -v .\...
- - go vet .\...
diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod
deleted file mode 100644
index ae7cbc15c..000000000
--- a/vendor/go.opencensus.io/go.mod
+++ /dev/null
@@ -1,20 +0,0 @@
-module go.opencensus.io
-
-require (
- git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999
- github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973
- github.com/golang/protobuf v1.2.0
- github.com/matttproud/golang_protobuf_extensions v1.0.1
- github.com/openzipkin/zipkin-go v0.1.1
- github.com/prometheus/client_golang v0.8.0
- github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
- github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e
- github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
- golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e
- golang.org/x/text v0.3.0
- google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf
- google.golang.org/genproto v0.0.0-20180831171423-11092d34479b
- google.golang.org/grpc v1.14.0
-)
diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum
deleted file mode 100644
index 91d0af1e4..000000000
--- a/vendor/go.opencensus.io/go.sum
+++ /dev/null
@@ -1,21 +0,0 @@
-git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go
deleted file mode 100644
index e1d1238d0..000000000
--- a/vendor/go.opencensus.io/internal/internal.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opencensus.io/internal"
-
-import (
- "fmt"
- "time"
-
- "go.opencensus.io"
-)
-
-// UserAgent is the user agent to be added to the outgoing
-// requests from the exporters.
-var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version())
-
-// MonotonicEndTime returns the end time at present
-// but offset from start, monotonically.
-//
-// The monotonic clock is used in subtractions hence
-// the duration since start added back to start gives
-// end as a monotonic time.
-// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
-func MonotonicEndTime(start time.Time) time.Time {
- return start.Add(time.Now().Sub(start))
-}
diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go
deleted file mode 100644
index de8ccf236..000000000
--- a/vendor/go.opencensus.io/internal/sanitize.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "strings"
- "unicode"
-)
-
-const labelKeySizeLimit = 100
-
-// Sanitize returns a string that is trunacated to 100 characters if it's too
-// long, and replaces non-alphanumeric characters to underscores.
-func Sanitize(s string) string {
- if len(s) == 0 {
- return s
- }
- if len(s) > labelKeySizeLimit {
- s = s[:labelKeySizeLimit]
- }
- s = strings.Map(sanitizeRune, s)
- if unicode.IsDigit(rune(s[0])) {
- s = "key_" + s
- }
- if s[0] == '_' {
- s = "key" + s
- }
- return s
-}
-
-// converts anything that is not a letter or digit to an underscore
-func sanitizeRune(r rune) rune {
- if unicode.IsLetter(r) || unicode.IsDigit(r) {
- return r
- }
- // Everything else turns into an underscore
- return '_'
-}
diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
deleted file mode 100644
index 3b1af8b4b..000000000
--- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Package tagencoding contains the tag encoding
-// used interally by the stats collector.
-package tagencoding // import "go.opencensus.io/internal/tagencoding"
-
-type Values struct {
- Buffer []byte
- WriteIndex int
- ReadIndex int
-}
-
-func (vb *Values) growIfRequired(expected int) {
- if len(vb.Buffer)-vb.WriteIndex < expected {
- tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected)
- copy(tmp, vb.Buffer)
- vb.Buffer = tmp
- }
-}
-
-func (vb *Values) WriteValue(v []byte) {
- length := len(v) & 0xff
- vb.growIfRequired(1 + length)
-
- // writing length of v
- vb.Buffer[vb.WriteIndex] = byte(length)
- vb.WriteIndex++
-
- if length == 0 {
- // No value was encoded for this key
- return
- }
-
- // writing v
- copy(vb.Buffer[vb.WriteIndex:], v[:length])
- vb.WriteIndex += length
-}
-
-// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte.
-func (vb *Values) ReadValue() []byte {
- // read length of v
- length := int(vb.Buffer[vb.ReadIndex])
- vb.ReadIndex++
- if length == 0 {
- // No value was encoded for this key
- return nil
- }
-
- // read value of v
- v := make([]byte, length)
- endIdx := vb.ReadIndex + length
- copy(v, vb.Buffer[vb.ReadIndex:endIdx])
- vb.ReadIndex = endIdx
- return v
-}
-
-func (vb *Values) Bytes() []byte {
- return vb.Buffer[:vb.WriteIndex]
-}
diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go
deleted file mode 100644
index 553ca68dc..000000000
--- a/vendor/go.opencensus.io/internal/traceinternals.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "time"
-)
-
-// Trace allows internal access to some trace functionality.
-// TODO(#412): remove this
-var Trace interface{}
-
-var LocalSpanStoreEnabled bool
-
-// BucketConfiguration stores the number of samples to store for span buckets
-// for successful and failed spans for a particular span name.
-type BucketConfiguration struct {
- Name string
- MaxRequestsSucceeded int
- MaxRequestsErrors int
-}
-
-// PerMethodSummary is a summary of the spans stored for a single span name.
-type PerMethodSummary struct {
- Active int
- LatencyBuckets []LatencyBucketSummary
- ErrorBuckets []ErrorBucketSummary
-}
-
-// LatencyBucketSummary is a summary of a latency bucket.
-type LatencyBucketSummary struct {
- MinLatency, MaxLatency time.Duration
- Size int
-}
-
-// ErrorBucketSummary is a summary of an error bucket.
-type ErrorBucketSummary struct {
- ErrorCode int32
- Size int
-}
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
deleted file mode 100644
index 985297ee1..000000000
--- a/vendor/go.opencensus.io/opencensus.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package opencensus contains Go support for OpenCensus.
-package opencensus // import "go.opencensus.io"
-
-// Version is the current release version of OpenCensus in use.
-func Version() string {
- return "0.17.0"
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go
deleted file mode 100644
index 68faf24f5..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/client.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "net/http"
- "net/http/httptrace"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// Transport is an http.RoundTripper that instruments all outgoing requests with
-// OpenCensus stats and tracing.
-//
-// The zero value is intended to be a useful default, but for
-// now it's recommended that you explicitly set Propagation, since the default
-// for this may change.
-type Transport struct {
- // Base may be set to wrap another http.RoundTripper that does the actual
- // requests. By default http.DefaultTransport is used.
- //
- // If base HTTP roundtripper implements CancelRequest,
- // the returned round tripper will be cancelable.
- Base http.RoundTripper
-
- // Propagation defines how traces are propagated. If unspecified, a default
- // (currently B3 format) will be used.
- Propagation propagation.HTTPFormat
-
- // StartOptions are applied to the span started by this Transport around each
- // request.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this transport.
- StartOptions trace.StartOptions
-
- // NameFromRequest holds the function to use for generating the span name
- // from the information found in the outgoing HTTP Request. By default the
- // name equals the URL Path.
- FormatSpanName func(*http.Request) string
-
- // NewClientTrace may be set to a function allowing the current *trace.Span
- // to be annotated with HTTP request event information emitted by the
- // httptrace package.
- NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
-
- // TODO: Implement tag propagation for HTTP.
-}
-
-// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.base()
- if isHealthEndpoint(req.URL.Path) {
- return rt.RoundTrip(req)
- }
- // TODO: remove excessive nesting of http.RoundTrippers here.
- format := t.Propagation
- if format == nil {
- format = defaultFormat
- }
- spanNameFormatter := t.FormatSpanName
- if spanNameFormatter == nil {
- spanNameFormatter = spanNameFromURL
- }
- rt = &traceTransport{
- base: rt,
- format: format,
- startOptions: trace.StartOptions{
- Sampler: t.StartOptions.Sampler,
- SpanKind: trace.SpanKindClient,
- },
- formatSpanName: spanNameFormatter,
- newClientTrace: t.NewClientTrace,
- }
- rt = statsTransport{base: rt}
- return rt.RoundTrip(req)
-}
-
-func (t *Transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *Transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- cr.CancelRequest(req)
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
deleted file mode 100644
index 9b286b929..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
-)
-
-// statsTransport is an http.RoundTripper that collects stats for the outgoing requests.
-type statsTransport struct {
- base http.RoundTripper
-}
-
-// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
-func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- ctx, _ := tag.New(req.Context(),
- tag.Upsert(Host, req.URL.Host),
- tag.Upsert(Path, req.URL.Path),
- tag.Upsert(Method, req.Method))
- req = req.WithContext(ctx)
- track := &tracker{
- start: time.Now(),
- ctx: ctx,
- }
- if req.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- track.reqSize = -1
- } else if req.ContentLength > 0 {
- track.reqSize = req.ContentLength
- }
- stats.Record(ctx, ClientRequestCount.M(1))
-
- // Perform request.
- resp, err := t.base.RoundTrip(req)
-
- if err != nil {
- track.statusCode = http.StatusInternalServerError
- track.end()
- } else {
- track.statusCode = resp.StatusCode
- if resp.Body == nil {
- track.end()
- } else {
- track.body = resp.Body
- resp.Body = track
- }
- }
- return resp, err
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t statsTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base.(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-type tracker struct {
- ctx context.Context
- respSize int64
- reqSize int64
- start time.Time
- body io.ReadCloser
- statusCode int
- endOnce sync.Once
-}
-
-var _ io.ReadCloser = (*tracker)(nil)
-
-func (t *tracker) end() {
- t.endOnce.Do(func() {
- m := []stats.Measurement{
- ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
- ClientResponseBytes.M(t.respSize),
- }
- if t.reqSize >= 0 {
- m = append(m, ClientRequestBytes.M(t.reqSize))
- }
- ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)))
- stats.Record(ctx, m...)
- })
-}
-
-func (t *tracker) Read(b []byte) (int, error) {
- n, err := t.body.Read(b)
- switch err {
- case nil:
- t.respSize += int64(n)
- return n, nil
- case io.EOF:
- t.end()
- }
- return n, err
-}
-
-func (t *tracker) Close() error {
- // Invoking endSpan on Close will help catch the cases
- // in which a read returned a non-nil error, we set the
- // span status but didn't end the span.
- t.end()
- return t.body.Close()
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go
deleted file mode 100644
index 10e626b16..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ochttp provides OpenCensus instrumentation for net/http package.
-//
-// For server instrumentation, see Handler. For client-side instrumentation,
-// see Transport.
-package ochttp // import "go.opencensus.io/plugin/ochttp"
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
deleted file mode 100644
index f777772ec..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package b3 contains a propagation.HTTPFormat implementation
-// for B3 propagation. See https://github.com/openzipkin/b3-propagation
-// for more details.
-package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
-
-import (
- "encoding/hex"
- "net/http"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// B3 headers that OpenCensus understands.
-const (
- TraceIDHeader = "X-B3-TraceId"
- SpanIDHeader = "X-B3-SpanId"
- SampledHeader = "X-B3-Sampled"
-)
-
-// HTTPFormat implements propagation.HTTPFormat to propagate
-// traces in HTTP headers in B3 propagation format.
-// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers
-// because there are additional fields not represented in the
-// OpenCensus span context. Spans created from the incoming
-// header will be the direct children of the client-side span.
-// Similarly, reciever of the outgoing spans should use client-side
-// span created by OpenCensus as the parent.
-type HTTPFormat struct{}
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// SpanContextFromRequest extracts a B3 span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
- if !ok {
- return trace.SpanContext{}, false
- }
- sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
- if !ok {
- return trace.SpanContext{}, false
- }
- sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
- return trace.SpanContext{
- TraceID: tid,
- SpanID: sid,
- TraceOptions: sampled,
- }, true
-}
-
-// ParseTraceID parses the value of the X-B3-TraceId header.
-func ParseTraceID(tid string) (trace.TraceID, bool) {
- if tid == "" {
- return trace.TraceID{}, false
- }
- b, err := hex.DecodeString(tid)
- if err != nil {
- return trace.TraceID{}, false
- }
- var traceID trace.TraceID
- if len(b) <= 8 {
- // The lower 64-bits.
- start := 8 + (8 - len(b))
- copy(traceID[start:], b)
- } else {
- start := 16 - len(b)
- copy(traceID[start:], b)
- }
-
- return traceID, true
-}
-
-// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
-func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
- if sid == "" {
- return trace.SpanID{}, false
- }
- b, err := hex.DecodeString(sid)
- if err != nil {
- return trace.SpanID{}, false
- }
- start := 8 - len(b)
- copy(spanID[start:], b)
- return spanID, true
-}
-
-// ParseSampled parses the value of the X-B3-Sampled header.
-func ParseSampled(sampled string) (trace.TraceOptions, bool) {
- switch sampled {
- case "true", "1":
- return trace.TraceOptions(1), true
- default:
- return trace.TraceOptions(0), false
- }
-}
-
-// SpanContextToRequest modifies the given request to include B3 headers.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
- req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
-
- var sampled string
- if sc.IsSampled() {
- sampled = "1"
- } else {
- sampled = "0"
- }
- req.Header.Set(SampledHeader, sampled)
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go
deleted file mode 100644
index dbe22d586..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/route.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "net/http"
-
- "go.opencensus.io/tag"
-)
-
-// WithRouteTag returns an http.Handler that records stats with the
-// http_server_route tag set to the given value.
-func WithRouteTag(handler http.Handler, route string) http.Handler {
- return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
- addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
- ctx, _ := tag.New(r.Context(), addRoute...)
- r = r.WithContext(ctx)
- handler.ServeHTTP(w, r)
- return addRoute
- })
-}
-
-// taggedHandlerFunc is a http.Handler that returns tags describing the
-// processing of the request. These tags will be recorded along with the
-// measures in this package at the end of the request.
-type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
-
-func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- tags := h(w, r)
- if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
- a.t = append(a.t, tags...)
- }
-}
-
-type addedTagsKey struct{}
-
-type addedTags struct {
- t []tag.Mutator
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go
deleted file mode 100644
index ea2e3e288..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/server.go
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// Handler is an http.Handler wrapper to instrument your HTTP server with
-// OpenCensus. It supports both stats and tracing.
-//
-// Tracing
-//
-// This handler is aware of the incoming request's span, reading it from request
-// headers as configured using the Propagation field.
-// The extracted span can be accessed from the incoming request's
-// context.
-//
-// span := trace.FromContext(r.Context())
-//
-// The server span will be automatically ended at the end of ServeHTTP.
-type Handler struct {
- // Propagation defines how traces are propagated. If unspecified,
- // B3 propagation will be used.
- Propagation propagation.HTTPFormat
-
- // Handler is the handler used to handle the incoming request.
- Handler http.Handler
-
- // StartOptions are applied to the span started by this Handler around each
- // request.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this transport.
- StartOptions trace.StartOptions
-
- // IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
- // servers. If true, any trace metadata set on the incoming request will
- // be added as a linked trace instead of being added as a parent of the
- // current trace.
- IsPublicEndpoint bool
-
- // FormatSpanName holds the function to use for generating the span name
- // from the information found in the incoming HTTP Request. By default the
- // name equals the URL Path.
- FormatSpanName func(*http.Request) string
-}
-
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- var tags addedTags
- r, traceEnd := h.startTrace(w, r)
- defer traceEnd()
- w, statsEnd := h.startStats(w, r)
- defer statsEnd(&tags)
- handler := h.Handler
- if handler == nil {
- handler = http.DefaultServeMux
- }
- r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
- handler.ServeHTTP(w, r)
-}
-
-func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
- if isHealthEndpoint(r.URL.Path) {
- return r, func() {}
- }
- var name string
- if h.FormatSpanName == nil {
- name = spanNameFromURL(r)
- } else {
- name = h.FormatSpanName(r)
- }
- ctx := r.Context()
- var span *trace.Span
- sc, ok := h.extractSpanContext(r)
- if ok && !h.IsPublicEndpoint {
- ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
- trace.WithSampler(h.StartOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindServer))
- } else {
- ctx, span = trace.StartSpan(ctx, name,
- trace.WithSampler(h.StartOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindServer),
- )
- if ok {
- span.AddLink(trace.Link{
- TraceID: sc.TraceID,
- SpanID: sc.SpanID,
- Type: trace.LinkTypeChild,
- Attributes: nil,
- })
- }
- }
- span.AddAttributes(requestAttrs(r)...)
- return r.WithContext(ctx), span.End
-}
-
-func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
- if h.Propagation == nil {
- return defaultFormat.SpanContextFromRequest(r)
- }
- return h.Propagation.SpanContextFromRequest(r)
-}
-
-func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
- ctx, _ := tag.New(r.Context(),
- tag.Upsert(Host, r.URL.Host),
- tag.Upsert(Path, r.URL.Path),
- tag.Upsert(Method, r.Method))
- track := &trackingResponseWriter{
- start: time.Now(),
- ctx: ctx,
- writer: w,
- }
- if r.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- track.reqSize = -1
- } else if r.ContentLength > 0 {
- track.reqSize = r.ContentLength
- }
- stats.Record(ctx, ServerRequestCount.M(1))
- return track.wrappedResponseWriter(), track.end
-}
-
-type trackingResponseWriter struct {
- ctx context.Context
- reqSize int64
- respSize int64
- start time.Time
- statusCode int
- statusLine string
- endOnce sync.Once
- writer http.ResponseWriter
-}
-
-// Compile time assertion for ResponseWriter interface
-var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
-
-var logTagsErrorOnce sync.Once
-
-func (t *trackingResponseWriter) end(tags *addedTags) {
- t.endOnce.Do(func() {
- if t.statusCode == 0 {
- t.statusCode = 200
- }
-
- span := trace.FromContext(t.ctx)
- span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
-
- m := []stats.Measurement{
- ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
- ServerResponseBytes.M(t.respSize),
- }
- if t.reqSize >= 0 {
- m = append(m, ServerRequestBytes.M(t.reqSize))
- }
- allTags := make([]tag.Mutator, len(tags.t)+1)
- allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
- copy(allTags[1:], tags.t)
- ctx, _ := tag.New(t.ctx, allTags...)
- stats.Record(ctx, m...)
- })
-}
-
-func (t *trackingResponseWriter) Header() http.Header {
- return t.writer.Header()
-}
-
-func (t *trackingResponseWriter) Write(data []byte) (int, error) {
- n, err := t.writer.Write(data)
- t.respSize += int64(n)
- return n, err
-}
-
-func (t *trackingResponseWriter) WriteHeader(statusCode int) {
- t.writer.WriteHeader(statusCode)
- t.statusCode = statusCode
- t.statusLine = http.StatusText(t.statusCode)
-}
-
-// wrappedResponseWriter returns a wrapped version of the original
-// ResponseWriter and only implements the same combination of additional
-// interfaces as the original.
-// This implementation is based on https://github.com/felixge/httpsnoop.
-func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
- var (
- hj, i0 = t.writer.(http.Hijacker)
- cn, i1 = t.writer.(http.CloseNotifier)
- pu, i2 = t.writer.(http.Pusher)
- fl, i3 = t.writer.(http.Flusher)
- rf, i4 = t.writer.(io.ReaderFrom)
- )
-
- switch {
- case !i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- }{t}
- case !i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- io.ReaderFrom
- }{t, rf}
- case !i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Flusher
- }{t, fl}
- case !i0 && !i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{t, fl, rf}
- case !i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- }{t, pu}
- case !i0 && !i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- io.ReaderFrom
- }{t, pu, rf}
- case !i0 && !i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- }{t, pu, fl}
- case !i0 && !i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, pu, fl, rf}
- case !i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- }{t, cn}
- case !i0 && i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{t, cn, rf}
- case !i0 && i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Flusher
- }{t, cn, fl}
- case !i0 && i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Flusher
- io.ReaderFrom
- }{t, cn, fl, rf}
- case !i0 && i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- }{t, cn, pu}
- case !i0 && i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- io.ReaderFrom
- }{t, cn, pu, rf}
- case !i0 && i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- http.Flusher
- }{t, cn, pu, fl}
- case !i0 && i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, cn, pu, fl, rf}
- case i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- }{t, hj}
- case i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{t, hj, rf}
- case i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Flusher
- }{t, hj, fl}
- case i0 && !i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Flusher
- io.ReaderFrom
- }{t, hj, fl, rf}
- case i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- }{t, hj, pu}
- case i0 && !i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- io.ReaderFrom
- }{t, hj, pu, rf}
- case i0 && !i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- http.Flusher
- }{t, hj, pu, fl}
- case i0 && !i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, hj, pu, fl, rf}
- case i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- }{t, hj, cn}
- case i0 && i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- io.ReaderFrom
- }{t, hj, cn, rf}
- case i0 && i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Flusher
- }{t, hj, cn, fl}
- case i0 && i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Flusher
- io.ReaderFrom
- }{t, hj, cn, fl, rf}
- case i0 && i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- }{t, hj, cn, pu}
- case i0 && i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- io.ReaderFrom
- }{t, hj, cn, pu, rf}
- case i0 && i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- http.Flusher
- }{t, hj, cn, pu, fl}
- case i0 && i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, hj, cn, pu, fl, rf}
- default:
- return struct {
- http.ResponseWriter
- }{t}
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
deleted file mode 100644
index 7aa03cd5d..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "crypto/tls"
- "net/http"
- "net/http/httptrace"
- "strings"
-
- "go.opencensus.io/trace"
-)
-
-type spanAnnotator struct {
- sp *trace.Span
-}
-
-// TODO: Remove NewSpanAnnotator at the next release.
-
-// Deprecated: Use NewSpanAnnotatingClientTrace instead
-func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
- return NewSpanAnnotatingClientTrace(r, s)
-}
-
-// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
-// all emitted httptrace events on the provided Span.
-func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
- sa := spanAnnotator{sp: s}
-
- return &httptrace.ClientTrace{
- GetConn: sa.getConn,
- GotConn: sa.gotConn,
- PutIdleConn: sa.putIdleConn,
- GotFirstResponseByte: sa.gotFirstResponseByte,
- Got100Continue: sa.got100Continue,
- DNSStart: sa.dnsStart,
- DNSDone: sa.dnsDone,
- ConnectStart: sa.connectStart,
- ConnectDone: sa.connectDone,
- TLSHandshakeStart: sa.tlsHandshakeStart,
- TLSHandshakeDone: sa.tlsHandshakeDone,
- WroteHeaders: sa.wroteHeaders,
- Wait100Continue: sa.wait100Continue,
- WroteRequest: sa.wroteRequest,
- }
-}
-
-func (s spanAnnotator) getConn(hostPort string) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
- }
- s.sp.Annotate(attrs, "GetConn")
-}
-
-func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
- attrs := []trace.Attribute{
- trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
- trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
- }
- if info.WasIdle {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
- }
- s.sp.Annotate(attrs, "GotConn")
-}
-
-// PutIdleConn implements a httptrace.ClientTrace hook
-func (s spanAnnotator) putIdleConn(err error) {
- var attrs []trace.Attribute
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
- }
- s.sp.Annotate(attrs, "PutIdleConn")
-}
-
-func (s spanAnnotator) gotFirstResponseByte() {
- s.sp.Annotate(nil, "GotFirstResponseByte")
-}
-
-func (s spanAnnotator) got100Continue() {
- s.sp.Annotate(nil, "Got100Continue")
-}
-
-func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.dns_start.host", info.Host),
- }
- s.sp.Annotate(attrs, "DNSStart")
-}
-
-func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
- var addrs []string
- for _, addr := range info.Addrs {
- addrs = append(addrs, addr.String())
- }
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
- }
- if info.Err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
- }
- s.sp.Annotate(attrs, "DNSDone")
-}
-
-func (s spanAnnotator) connectStart(network, addr string) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.connect_start.network", network),
- trace.StringAttribute("httptrace.connect_start.addr", addr),
- }
- s.sp.Annotate(attrs, "ConnectStart")
-}
-
-func (s spanAnnotator) connectDone(network, addr string, err error) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.connect_done.network", network),
- trace.StringAttribute("httptrace.connect_done.addr", addr),
- }
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.connect_done.error", err.Error()))
- }
- s.sp.Annotate(attrs, "ConnectDone")
-}
-
-func (s spanAnnotator) tlsHandshakeStart() {
- s.sp.Annotate(nil, "TLSHandshakeStart")
-}
-
-func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
- var attrs []trace.Attribute
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
- }
- s.sp.Annotate(attrs, "TLSHandshakeDone")
-}
-
-func (s spanAnnotator) wroteHeaders() {
- s.sp.Annotate(nil, "WroteHeaders")
-}
-
-func (s spanAnnotator) wait100Continue() {
- s.sp.Annotate(nil, "Wait100Continue")
-}
-
-func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
- var attrs []trace.Attribute
- if info.Err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
- }
- s.sp.Annotate(attrs, "WroteRequest")
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go
deleted file mode 100644
index 21d651230..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/stats.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following client HTTP measures are supported for use in custom views.
-var (
- ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
- ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
- ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
- ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
-)
-
-// The following server HTTP measures are supported for use in custom views:
-var (
- ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
- ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
- ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
- ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
-)
-
-// The following tags are applied to stats recorded by this package. Host, Path
-// and Method are applied to all measures. StatusCode is not applied to
-// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
-var (
- // Host is the value of the HTTP Host header.
- //
- // The value of this tag can be controlled by the HTTP client, so you need
- // to watch out for potentially generating high-cardinality labels in your
- // metrics backend if you use this tag in views.
- Host, _ = tag.NewKey("http.host")
-
- // StatusCode is the numeric HTTP response status code,
- // or "error" if a transport error occurred and no status code was read.
- StatusCode, _ = tag.NewKey("http.status")
-
- // Path is the URL path (not including query string) in the request.
- //
- // The value of this tag can be controlled by the HTTP client, so you need
- // to watch out for potentially generating high-cardinality labels in your
- // metrics backend if you use this tag in views.
- Path, _ = tag.NewKey("http.path")
-
- // Method is the HTTP method of the request, capitalized (GET, POST, etc.).
- Method, _ = tag.NewKey("http.method")
-
- // KeyServerRoute is a low cardinality string representing the logical
- // handler of the request. This is usually the pattern registered on the a
- // ServeMux (or similar string).
- KeyServerRoute, _ = tag.NewKey("http_server_route")
-)
-
-// Default distributions used by views in this package.
-var (
- DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
-)
-
-// Package ochttp provides some convenience views.
-// You need to register the views for data to actually be collected.
-var (
- ClientRequestCountView = &view.View{
- Name: "opencensus.io/http/client/request_count",
- Description: "Count of HTTP requests started",
- Measure: ClientRequestCount,
- Aggregation: view.Count(),
- }
-
- ClientRequestBytesView = &view.View{
- Name: "opencensus.io/http/client/request_bytes",
- Description: "Size distribution of HTTP request body",
- Measure: ClientRequestBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ClientResponseBytesView = &view.View{
- Name: "opencensus.io/http/client/response_bytes",
- Description: "Size distribution of HTTP response body",
- Measure: ClientResponseBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ClientLatencyView = &view.View{
- Name: "opencensus.io/http/client/latency",
- Description: "Latency distribution of HTTP requests",
- Measure: ClientLatency,
- Aggregation: DefaultLatencyDistribution,
- }
-
- ClientRequestCountByMethod = &view.View{
- Name: "opencensus.io/http/client/request_count_by_method",
- Description: "Client request count by HTTP method",
- TagKeys: []tag.Key{Method},
- Measure: ClientRequestCount,
- Aggregation: view.Count(),
- }
-
- ClientResponseCountByStatusCode = &view.View{
- Name: "opencensus.io/http/client/response_count_by_status_code",
- Description: "Client response count by status code",
- TagKeys: []tag.Key{StatusCode},
- Measure: ClientLatency,
- Aggregation: view.Count(),
- }
-
- ServerRequestCountView = &view.View{
- Name: "opencensus.io/http/server/request_count",
- Description: "Count of HTTP requests started",
- Measure: ServerRequestCount,
- Aggregation: view.Count(),
- }
-
- ServerRequestBytesView = &view.View{
- Name: "opencensus.io/http/server/request_bytes",
- Description: "Size distribution of HTTP request body",
- Measure: ServerRequestBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ServerResponseBytesView = &view.View{
- Name: "opencensus.io/http/server/response_bytes",
- Description: "Size distribution of HTTP response body",
- Measure: ServerResponseBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "opencensus.io/http/server/latency",
- Description: "Latency distribution of HTTP requests",
- Measure: ServerLatency,
- Aggregation: DefaultLatencyDistribution,
- }
-
- ServerRequestCountByMethod = &view.View{
- Name: "opencensus.io/http/server/request_count_by_method",
- Description: "Server request count by HTTP method",
- TagKeys: []tag.Key{Method},
- Measure: ServerRequestCount,
- Aggregation: view.Count(),
- }
-
- ServerResponseCountByStatusCode = &view.View{
- Name: "opencensus.io/http/server/response_count_by_status_code",
- Description: "Server response count by status code",
- TagKeys: []tag.Key{StatusCode},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-var DefaultClientViews = []*view.View{
- ClientRequestCountView,
- ClientRequestBytesView,
- ClientResponseBytesView,
- ClientLatencyView,
- ClientRequestCountByMethod,
- ClientResponseCountByStatusCode,
-}
-
-// DefaultServerViews are the default server views provided by this package.
-var DefaultServerViews = []*view.View{
- ServerRequestCountView,
- ServerRequestBytesView,
- ServerResponseBytesView,
- ServerLatencyView,
- ServerRequestCountByMethod,
- ServerResponseCountByStatusCode,
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go
deleted file mode 100644
index 819a2d5ff..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/trace.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "io"
- "net/http"
- "net/http/httptrace"
-
- "go.opencensus.io/plugin/ochttp/propagation/b3"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// TODO(jbd): Add godoc examples.
-
-var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{}
-
-// Attributes recorded on the span for the requests.
-// Only trace exporters will need them.
-const (
- HostAttribute = "http.host"
- MethodAttribute = "http.method"
- PathAttribute = "http.path"
- UserAgentAttribute = "http.user_agent"
- StatusCodeAttribute = "http.status_code"
-)
-
-type traceTransport struct {
- base http.RoundTripper
- startOptions trace.StartOptions
- format propagation.HTTPFormat
- formatSpanName func(*http.Request) string
- newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
-}
-
-// TODO(jbd): Add message events for request and response size.
-
-// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers.
-// The created span can follow a parent span, if a parent is presented in
-// the request's context.
-func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- name := t.formatSpanName(req)
- // TODO(jbd): Discuss whether we want to prefix
- // outgoing requests with Sent.
- ctx, span := trace.StartSpan(req.Context(), name,
- trace.WithSampler(t.startOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient))
-
- if t.newClientTrace != nil {
- req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
- } else {
- req = req.WithContext(ctx)
- }
-
- if t.format != nil {
- // SpanContextToRequest will modify its Request argument, which is
- // contrary to the contract for http.RoundTripper, so we need to
- // pass it a copy of the Request.
- // However, the Request struct itself was already copied by
- // the WithContext calls above and so we just need to copy the header.
- header := make(http.Header)
- for k, v := range req.Header {
- header[k] = v
- }
- req.Header = header
- t.format.SpanContextToRequest(span.SpanContext(), req)
- }
-
- span.AddAttributes(requestAttrs(req)...)
- resp, err := t.base.RoundTrip(req)
- if err != nil {
- span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
- span.End()
- return resp, err
- }
-
- span.AddAttributes(responseAttrs(resp)...)
- span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
-
- // span.End() will be invoked after
- // a read from resp.Body returns io.EOF or when
- // resp.Body.Close() is invoked.
- resp.Body = &bodyTracker{rc: resp.Body, span: span}
- return resp, err
-}
-
-// bodyTracker wraps a response.Body and invokes
-// trace.EndSpan on encountering io.EOF on reading
-// the body of the original response.
-type bodyTracker struct {
- rc io.ReadCloser
- span *trace.Span
-}
-
-var _ io.ReadCloser = (*bodyTracker)(nil)
-
-func (bt *bodyTracker) Read(b []byte) (int, error) {
- n, err := bt.rc.Read(b)
-
- switch err {
- case nil:
- return n, nil
- case io.EOF:
- bt.span.End()
- default:
- // For all other errors, set the span status
- bt.span.SetStatus(trace.Status{
- // Code 2 is the error code for Internal server error.
- Code: 2,
- Message: err.Error(),
- })
- }
- return n, err
-}
-
-func (bt *bodyTracker) Close() error {
- // Invoking endSpan on Close will help catch the cases
- // in which a read returned a non-nil error, we set the
- // span status but didn't end the span.
- bt.span.End()
- return bt.rc.Close()
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *traceTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base.(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-func spanNameFromURL(req *http.Request) string {
- return req.URL.Path
-}
-
-func requestAttrs(r *http.Request) []trace.Attribute {
- return []trace.Attribute{
- trace.StringAttribute(PathAttribute, r.URL.Path),
- trace.StringAttribute(HostAttribute, r.URL.Host),
- trace.StringAttribute(MethodAttribute, r.Method),
- trace.StringAttribute(UserAgentAttribute, r.UserAgent()),
- }
-}
-
-func responseAttrs(resp *http.Response) []trace.Attribute {
- return []trace.Attribute{
- trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)),
- }
-}
-
-// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
-// represents the outcome as closely as possible.
-func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
- var code int32
- if httpStatusCode < 200 || httpStatusCode >= 400 {
- code = trace.StatusCodeUnknown
- }
- switch httpStatusCode {
- case 499:
- code = trace.StatusCodeCancelled
- case http.StatusBadRequest:
- code = trace.StatusCodeInvalidArgument
- case http.StatusGatewayTimeout:
- code = trace.StatusCodeDeadlineExceeded
- case http.StatusNotFound:
- code = trace.StatusCodeNotFound
- case http.StatusForbidden:
- code = trace.StatusCodePermissionDenied
- case http.StatusUnauthorized: // 401 is actually unauthenticated.
- code = trace.StatusCodeUnauthenticated
- case http.StatusTooManyRequests:
- code = trace.StatusCodeResourceExhausted
- case http.StatusNotImplemented:
- code = trace.StatusCodeUnimplemented
- case http.StatusServiceUnavailable:
- code = trace.StatusCodeUnavailable
- case http.StatusOK:
- code = trace.StatusCodeOK
- }
- return trace.Status{Code: code, Message: codeToStr[code]}
-}
-
-var codeToStr = map[int32]string{
- trace.StatusCodeOK: `OK`,
- trace.StatusCodeCancelled: `CANCELLED`,
- trace.StatusCodeUnknown: `UNKNOWN`,
- trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`,
- trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`,
- trace.StatusCodeNotFound: `NOT_FOUND`,
- trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`,
- trace.StatusCodePermissionDenied: `PERMISSION_DENIED`,
- trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`,
- trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
- trace.StatusCodeAborted: `ABORTED`,
- trace.StatusCodeOutOfRange: `OUT_OF_RANGE`,
- trace.StatusCodeUnimplemented: `UNIMPLEMENTED`,
- trace.StatusCodeInternal: `INTERNAL`,
- trace.StatusCodeUnavailable: `UNAVAILABLE`,
- trace.StatusCodeDataLoss: `DATA_LOSS`,
- trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`,
-}
-
-func isHealthEndpoint(path string) bool {
- // Health checking is pretty frequent and
- // traces collected for health endpoints
- // can be extremely noisy and expensive.
- // Disable canonical health checking endpoints
- // like /healthz and /_ah/health for now.
- if path == "/healthz" || path == "/_ah/health" {
- return true
- }
- return false
-}
diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go
deleted file mode 100644
index 7a8a62c14..000000000
--- a/vendor/go.opencensus.io/stats/doc.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package stats contains support for OpenCensus stats recording.
-
-OpenCensus allows users to create typed measures, record measurements,
-aggregate the collected data, and export the aggregated data.
-
-Measures
-
-A measure represents a type of metric to be tracked and recorded.
-For example, latency, request Mb/s, and response Mb/s are measures
-to collect from a server.
-
-Each measure needs to be registered before being used. Measure
-constructors such as Int64 and Float64 automatically
-register the measure by the given name. Each registered measure needs
-to be unique by name. Measures also have a description and a unit.
-
-Libraries can define and export measures for their end users to
-create views and collect instrumentation data.
-
-Recording measurements
-
-Measurement is a data point to be collected for a measure. For example,
-for a latency (ms) measure, 100 is a measurement that represents a 100ms
-latency event. Users collect data points on the existing measures with
-the current context. Tags from the current context are recorded with the
-measurements if they are any.
-
-Recorded measurements are dropped immediately if user is not aggregating
-them via views. Users don't necessarily need to conditionally enable/disable
-recording to reduce cost. Recording of measurements is cheap.
-
-Libraries can always record measurements, and end-users can later decide
-on which measurements they want to collect by registering views. This allows
-libraries to turn on the instrumentation by default.
-*/
-package stats // import "go.opencensus.io/stats"
-
-// TODO(acetechnologist): Add a link to the language independent OpenCensus
-// spec when it is available.
diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go
deleted file mode 100644
index 6341eb2ad..000000000
--- a/vendor/go.opencensus.io/stats/internal/record.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "go.opencensus.io/tag"
-)
-
-// DefaultRecorder will be called for each Record call.
-var DefaultRecorder func(*tag.Map, interface{})
-
-// SubscriptionReporter reports when a view subscribed with a measure.
-var SubscriptionReporter func(measure string)
diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/stats/internal/validation.go
deleted file mode 100644
index b946667f9..000000000
--- a/vendor/go.opencensus.io/stats/internal/validation.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opencensus.io/stats/internal"
-
-const (
- MaxNameLength = 255
-)
-
-func IsPrintable(str string) bool {
- for _, r := range str {
- if !(r >= ' ' && r <= '~') {
- return false
- }
- }
- return true
-}
diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go
deleted file mode 100644
index 7b4b49c67..000000000
--- a/vendor/go.opencensus.io/stats/measure.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// Measure represents a single numeric value to be tracked and recorded.
-// For example, latency, request bytes, and response bytes could be measures
-// to collect from a server.
-//
-// Measures by themselves have no outside effects. In order to be exported,
-// the measure needs to be used in a View. If no Views are defined over a
-// measure, there is very little cost in recording it.
-type Measure interface {
- // Name returns the name of this measure.
- //
- // Measure names are globally unique (among all libraries linked into your program).
- // We recommend prefixing the measure name with a domain name relevant to your
- // project or application.
- //
- // Measure names are never sent over the wire or exported to backends.
- // They are only used to create Views.
- Name() string
-
- // Description returns the human-readable description of this measure.
- Description() string
-
- // Unit returns the units for the values this measure takes on.
- //
- // Units are encoded according to the case-sensitive abbreviations from the
- // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
- Unit() string
-}
-
-// measureDescriptor is the untyped descriptor associated with each measure.
-// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
-// recording APIs.
-// Two Measures with the same name will have the same measureDescriptor.
-type measureDescriptor struct {
- subs int32 // access atomically
-
- name string
- description string
- unit string
-}
-
-func (m *measureDescriptor) subscribe() {
- atomic.StoreInt32(&m.subs, 1)
-}
-
-func (m *measureDescriptor) subscribed() bool {
- return atomic.LoadInt32(&m.subs) == 1
-}
-
-var (
- mu sync.RWMutex
- measures = make(map[string]*measureDescriptor)
-)
-
-func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
- mu.Lock()
- defer mu.Unlock()
-
- if stored, ok := measures[name]; ok {
- return stored
- }
- m := &measureDescriptor{
- name: name,
- description: desc,
- unit: unit,
- }
- measures[name] = m
- return m
-}
-
-// Measurement is the numeric value measured when recording stats. Each measure
-// provides methods to create measurements of their kind. For example, Int64Measure
-// provides M to convert an int64 into a measurement.
-type Measurement struct {
- v float64
- m Measure
-}
-
-// Value returns the value of the Measurement as a float64.
-func (m Measurement) Value() float64 {
- return m.v
-}
-
-// Measure returns the Measure from which this Measurement was created.
-func (m Measurement) Measure() Measure {
- return m.m
-}
diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go
deleted file mode 100644
index da4b5a83b..000000000
--- a/vendor/go.opencensus.io/stats/measure_float64.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Float64Measure is a measure for float64 values.
-type Float64Measure struct {
- md *measureDescriptor
-}
-
-// Name returns the name of the measure.
-func (m *Float64Measure) Name() string {
- return m.md.name
-}
-
-// Description returns the description of the measure.
-func (m *Float64Measure) Description() string {
- return m.md.description
-}
-
-// Unit returns the unit of the measure.
-func (m *Float64Measure) Unit() string {
- return m.md.unit
-}
-
-// M creates a new float64 measurement.
-// Use Record to record measurements.
-func (m *Float64Measure) M(v float64) Measurement {
- if !m.md.subscribed() {
- return Measurement{}
- }
- return Measurement{m: m, v: v}
-}
-
-// Float64 creates a new measure for float64 values.
-//
-// See the documentation for interface Measure for more guidance on the
-// parameters of this function.
-func Float64(name, description, unit string) *Float64Measure {
- mi := registerMeasureHandle(name, description, unit)
- return &Float64Measure{mi}
-}
diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go
deleted file mode 100644
index 5fedaad05..000000000
--- a/vendor/go.opencensus.io/stats/measure_int64.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Int64Measure is a measure for int64 values.
-type Int64Measure struct {
- md *measureDescriptor
-}
-
-// Name returns the name of the measure.
-func (m *Int64Measure) Name() string {
- return m.md.name
-}
-
-// Description returns the description of the measure.
-func (m *Int64Measure) Description() string {
- return m.md.description
-}
-
-// Unit returns the unit of the measure.
-func (m *Int64Measure) Unit() string {
- return m.md.unit
-}
-
-// M creates a new int64 measurement.
-// Use Record to record measurements.
-func (m *Int64Measure) M(v int64) Measurement {
- if !m.md.subscribed() {
- return Measurement{}
- }
- return Measurement{m: m, v: float64(v)}
-}
-
-// Int64 creates a new measure for int64 values.
-//
-// See the documentation for interface Measure for more guidance on the
-// parameters of this function.
-func Int64(name, description, unit string) *Int64Measure {
- mi := registerMeasureHandle(name, description, unit)
- return &Int64Measure{mi}
-}
diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go
deleted file mode 100644
index 98865ff69..000000000
--- a/vendor/go.opencensus.io/stats/record.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-import (
- "context"
-
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-func init() {
- internal.SubscriptionReporter = func(measure string) {
- mu.Lock()
- measures[measure].subscribe()
- mu.Unlock()
- }
-}
-
-// Record records one or multiple measurements with the same tags at once.
-// If there are any tags in the context, measurements will be tagged with them.
-func Record(ctx context.Context, ms ...Measurement) {
- if len(ms) == 0 {
- return
- }
- var record bool
- for _, m := range ms {
- if (m != Measurement{}) {
- record = true
- break
- }
- }
- if !record {
- return
- }
- if internal.DefaultRecorder != nil {
- internal.DefaultRecorder(tag.FromContext(ctx), ms)
- }
-}
diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go
deleted file mode 100644
index 6931a5f29..000000000
--- a/vendor/go.opencensus.io/stats/units.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Units are encoded according to the case-sensitive abbreviations from the
-// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
-const (
- UnitNone = "1" // Deprecated: Use UnitDimensionless.
- UnitDimensionless = "1"
- UnitBytes = "By"
- UnitMilliseconds = "ms"
-)
diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go
deleted file mode 100644
index b7f169b4a..000000000
--- a/vendor/go.opencensus.io/stats/view/aggregation.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-// AggType represents the type of aggregation function used on a View.
-type AggType int
-
-// All available aggregation types.
-const (
- AggTypeNone AggType = iota // no aggregation; reserved for future use.
- AggTypeCount // the count aggregation, see Count.
- AggTypeSum // the sum aggregation, see Sum.
- AggTypeDistribution // the distribution aggregation, see Distribution.
- AggTypeLastValue // the last value aggregation, see LastValue.
-)
-
-func (t AggType) String() string {
- return aggTypeName[t]
-}
-
-var aggTypeName = map[AggType]string{
- AggTypeNone: "None",
- AggTypeCount: "Count",
- AggTypeSum: "Sum",
- AggTypeDistribution: "Distribution",
- AggTypeLastValue: "LastValue",
-}
-
-// Aggregation represents a data aggregation method. Use one of the functions:
-// Count, Sum, or Distribution to construct an Aggregation.
-type Aggregation struct {
- Type AggType // Type is the AggType of this Aggregation.
- Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
-
- newData func() AggregationData
-}
-
-var (
- aggCount = &Aggregation{
- Type: AggTypeCount,
- newData: func() AggregationData {
- return &CountData{}
- },
- }
- aggSum = &Aggregation{
- Type: AggTypeSum,
- newData: func() AggregationData {
- return &SumData{}
- },
- }
-)
-
-// Count indicates that data collected and aggregated
-// with this method will be turned into a count value.
-// For example, total number of accepted requests can be
-// aggregated by using Count.
-func Count() *Aggregation {
- return aggCount
-}
-
-// Sum indicates that data collected and aggregated
-// with this method will be summed up.
-// For example, accumulated request bytes can be aggregated by using
-// Sum.
-func Sum() *Aggregation {
- return aggSum
-}
-
-// Distribution indicates that the desired aggregation is
-// a histogram distribution.
-//
-// An distribution aggregation may contain a histogram of the values in the
-// population. The bucket boundaries for that histogram are described
-// by the bounds. This defines len(bounds)+1 buckets.
-//
-// If len(bounds) >= 2 then the boundaries for bucket index i are:
-//
-// [-infinity, bounds[i]) for i = 0
-// [bounds[i-1], bounds[i]) for 0 < i < length
-// [bounds[i-1], +infinity) for i = length
-//
-// If len(bounds) is 0 then there is no histogram associated with the
-// distribution. There will be a single bucket with boundaries
-// (-infinity, +infinity).
-//
-// If len(bounds) is 1 then there is no finite buckets, and that single
-// element is the common boundary of the overflow and underflow buckets.
-func Distribution(bounds ...float64) *Aggregation {
- return &Aggregation{
- Type: AggTypeDistribution,
- Buckets: bounds,
- newData: func() AggregationData {
- return newDistributionData(bounds)
- },
- }
-}
-
-// LastValue only reports the last value recorded using this
-// aggregation. All other measurements will be dropped.
-func LastValue() *Aggregation {
- return &Aggregation{
- Type: AggTypeLastValue,
- newData: func() AggregationData {
- return &LastValueData{}
- },
- }
-}
diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go
deleted file mode 100644
index 88c500bff..000000000
--- a/vendor/go.opencensus.io/stats/view/aggregation_data.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "math"
-)
-
-// AggregationData represents an aggregated value from a collection.
-// They are reported on the view data during exporting.
-// Mosts users won't directly access aggregration data.
-type AggregationData interface {
- isAggregationData() bool
- addSample(v float64)
- clone() AggregationData
- equal(other AggregationData) bool
-}
-
-const epsilon = 1e-9
-
-// CountData is the aggregated data for the Count aggregation.
-// A count aggregation processes data and counts the recordings.
-//
-// Most users won't directly access count data.
-type CountData struct {
- Value int64
-}
-
-func (a *CountData) isAggregationData() bool { return true }
-
-func (a *CountData) addSample(v float64) {
- a.Value = a.Value + 1
-}
-
-func (a *CountData) clone() AggregationData {
- return &CountData{Value: a.Value}
-}
-
-func (a *CountData) equal(other AggregationData) bool {
- a2, ok := other.(*CountData)
- if !ok {
- return false
- }
-
- return a.Value == a2.Value
-}
-
-// SumData is the aggregated data for the Sum aggregation.
-// A sum aggregation processes data and sums up the recordings.
-//
-// Most users won't directly access sum data.
-type SumData struct {
- Value float64
-}
-
-func (a *SumData) isAggregationData() bool { return true }
-
-func (a *SumData) addSample(f float64) {
- a.Value += f
-}
-
-func (a *SumData) clone() AggregationData {
- return &SumData{Value: a.Value}
-}
-
-func (a *SumData) equal(other AggregationData) bool {
- a2, ok := other.(*SumData)
- if !ok {
- return false
- }
- return math.Pow(a.Value-a2.Value, 2) < epsilon
-}
-
-// DistributionData is the aggregated data for the
-// Distribution aggregation.
-//
-// Most users won't directly access distribution data.
-type DistributionData struct {
- Count int64 // number of data points aggregated
- Min float64 // minimum value in the distribution
- Max float64 // max value in the distribution
- Mean float64 // mean of the distribution
- SumOfSquaredDev float64 // sum of the squared deviation from the mean
- CountPerBucket []int64 // number of occurrences per bucket
- bounds []float64 // histogram distribution of the values
-}
-
-func newDistributionData(bounds []float64) *DistributionData {
- return &DistributionData{
- CountPerBucket: make([]int64, len(bounds)+1),
- bounds: bounds,
- Min: math.MaxFloat64,
- Max: math.SmallestNonzeroFloat64,
- }
-}
-
-// Sum returns the sum of all samples collected.
-func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) }
-
-func (a *DistributionData) variance() float64 {
- if a.Count <= 1 {
- return 0
- }
- return a.SumOfSquaredDev / float64(a.Count-1)
-}
-
-func (a *DistributionData) isAggregationData() bool { return true }
-
-func (a *DistributionData) addSample(f float64) {
- if f < a.Min {
- a.Min = f
- }
- if f > a.Max {
- a.Max = f
- }
- a.Count++
- a.incrementBucketCount(f)
-
- if a.Count == 1 {
- a.Mean = f
- return
- }
-
- oldMean := a.Mean
- a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
- a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean)
-}
-
-func (a *DistributionData) incrementBucketCount(f float64) {
- if len(a.bounds) == 0 {
- a.CountPerBucket[0]++
- return
- }
-
- for i, b := range a.bounds {
- if f < b {
- a.CountPerBucket[i]++
- return
- }
- }
- a.CountPerBucket[len(a.bounds)]++
-}
-
-func (a *DistributionData) clone() AggregationData {
- counts := make([]int64, len(a.CountPerBucket))
- copy(counts, a.CountPerBucket)
- c := *a
- c.CountPerBucket = counts
- return &c
-}
-
-func (a *DistributionData) equal(other AggregationData) bool {
- a2, ok := other.(*DistributionData)
- if !ok {
- return false
- }
- if a2 == nil {
- return false
- }
- if len(a.CountPerBucket) != len(a2.CountPerBucket) {
- return false
- }
- for i := range a.CountPerBucket {
- if a.CountPerBucket[i] != a2.CountPerBucket[i] {
- return false
- }
- }
- return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
-}
-
-// LastValueData returns the last value recorded for LastValue aggregation.
-type LastValueData struct {
- Value float64
-}
-
-func (l *LastValueData) isAggregationData() bool {
- return true
-}
-
-func (l *LastValueData) addSample(v float64) {
- l.Value = v
-}
-
-func (l *LastValueData) clone() AggregationData {
- return &LastValueData{l.Value}
-}
-
-func (l *LastValueData) equal(other AggregationData) bool {
- a2, ok := other.(*LastValueData)
- if !ok {
- return false
- }
- return l.Value == a2.Value
-}
diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go
deleted file mode 100644
index 250395db2..000000000
--- a/vendor/go.opencensus.io/stats/view/collector.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "sort"
-
- "go.opencensus.io/internal/tagencoding"
- "go.opencensus.io/tag"
-)
-
-type collector struct {
- // signatures holds the aggregations values for each unique tag signature
- // (values for all keys) to its aggregator.
- signatures map[string]AggregationData
- // Aggregation is the description of the aggregation to perform for this
- // view.
- a *Aggregation
-}
-
-func (c *collector) addSample(s string, v float64) {
- aggregator, ok := c.signatures[s]
- if !ok {
- aggregator = c.a.newData()
- c.signatures[s] = aggregator
- }
- aggregator.addSample(v)
-}
-
-// collectRows returns a snapshot of the collected Row values.
-func (c *collector) collectedRows(keys []tag.Key) []*Row {
- rows := make([]*Row, 0, len(c.signatures))
- for sig, aggregator := range c.signatures {
- tags := decodeTags([]byte(sig), keys)
- row := &Row{Tags: tags, Data: aggregator.clone()}
- rows = append(rows, row)
- }
- return rows
-}
-
-func (c *collector) clearRows() {
- c.signatures = make(map[string]AggregationData)
-}
-
-// encodeWithKeys encodes the map by using values
-// only associated with the keys provided.
-func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
- vb := &tagencoding.Values{
- Buffer: make([]byte, len(keys)),
- }
- for _, k := range keys {
- v, _ := m.Value(k)
- vb.WriteValue([]byte(v))
- }
- return vb.Bytes()
-}
-
-// decodeTags decodes tags from the buffer and
-// orders them by the keys.
-func decodeTags(buf []byte, keys []tag.Key) []tag.Tag {
- vb := &tagencoding.Values{Buffer: buf}
- var tags []tag.Tag
- for _, k := range keys {
- v := vb.ReadValue()
- if v != nil {
- tags = append(tags, tag.Tag{Key: k, Value: string(v)})
- }
- }
- vb.ReadIndex = 0
- sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() })
- return tags
-}
diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go
deleted file mode 100644
index 856fb4e15..000000000
--- a/vendor/go.opencensus.io/stats/view/doc.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package view contains support for collecting and exposing aggregates over stats.
-
-In order to collect measurements, views need to be defined and registered.
-A view allows recorded measurements to be filtered and aggregated over a time window.
-
-All recorded measurements can be filtered by a list of tags.
-
-OpenCensus provides several aggregation methods: count, distribution and sum.
-Count aggregation only counts the number of measurement points. Distribution
-aggregation provides statistical summary of the aggregated data. Sum distribution
-sums up the measurement points. Aggregations are cumulative.
-
-Users can dynamically create and delete views.
-
-Libraries can export their own views and claim the view names
-by registering them themselves.
-
-Exporting
-
-Collected and aggregated data can be exported to a metric collection
-backend by registering its exporter.
-
-Multiple exporters can be registered to upload the data to various
-different backends. Users need to unregister the exporters once they
-no longer are needed.
-*/
-package view // import "go.opencensus.io/stats/view"
-
-// TODO(acetechnologist): Add a link to the language independent OpenCensus
-// spec when it is available.
diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go
deleted file mode 100644
index 7cb59718f..000000000
--- a/vendor/go.opencensus.io/stats/view/export.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package view
-
-import "sync"
-
-var (
- exportersMu sync.RWMutex // guards exporters
- exporters = make(map[Exporter]struct{})
-)
-
-// Exporter exports the collected records as view data.
-//
-// The ExportView method should return quickly; if an
-// Exporter takes a significant amount of time to
-// process a Data, that work should be done on another goroutine.
-//
-// It is safe to assume that ExportView will not be called concurrently from
-// multiple goroutines.
-//
-// The Data should not be modified.
-type Exporter interface {
- ExportView(viewData *Data)
-}
-
-// RegisterExporter registers an exporter.
-// Collected data will be reported via all the
-// registered exporters. Once you no longer
-// want data to be exported, invoke UnregisterExporter
-// with the previously registered exporter.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- exportersMu.Lock()
- defer exportersMu.Unlock()
-
- exporters[e] = struct{}{}
-}
-
-// UnregisterExporter unregisters an exporter.
-func UnregisterExporter(e Exporter) {
- exportersMu.Lock()
- defer exportersMu.Unlock()
-
- delete(exporters, e)
-}
diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go
deleted file mode 100644
index 22323e2c5..000000000
--- a/vendor/go.opencensus.io/stats/view/view.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "sort"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-// View allows users to aggregate the recorded stats.Measurements.
-// Views need to be passed to the Register function to be before data will be
-// collected and sent to Exporters.
-type View struct {
- Name string // Name of View. Must be unique. If unset, will default to the name of the Measure.
- Description string // Description is a human-readable description for this view.
-
- // TagKeys are the tag keys describing the grouping of this view.
- // A single Row will be produced for each combination of associated tag values.
- TagKeys []tag.Key
-
- // Measure is a stats.Measure to aggregate in this view.
- Measure stats.Measure
-
- // Aggregation is the aggregation function tp apply to the set of Measurements.
- Aggregation *Aggregation
-}
-
-// WithName returns a copy of the View with a new name. This is useful for
-// renaming views to cope with limitations placed on metric names by various
-// backends.
-func (v *View) WithName(name string) *View {
- vNew := *v
- vNew.Name = name
- return &vNew
-}
-
-// same compares two views and returns true if they represent the same aggregation.
-func (v *View) same(other *View) bool {
- if v == other {
- return true
- }
- if v == nil {
- return false
- }
- return reflect.DeepEqual(v.Aggregation, other.Aggregation) &&
- v.Measure.Name() == other.Measure.Name()
-}
-
-// canonicalize canonicalizes v by setting explicit
-// defaults for Name and Description and sorting the TagKeys
-func (v *View) canonicalize() error {
- if v.Measure == nil {
- return fmt.Errorf("cannot register view %q: measure not set", v.Name)
- }
- if v.Aggregation == nil {
- return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
- }
- if v.Name == "" {
- v.Name = v.Measure.Name()
- }
- if v.Description == "" {
- v.Description = v.Measure.Description()
- }
- if err := checkViewName(v.Name); err != nil {
- return err
- }
- sort.Slice(v.TagKeys, func(i, j int) bool {
- return v.TagKeys[i].Name() < v.TagKeys[j].Name()
- })
- return nil
-}
-
-// viewInternal is the internal representation of a View.
-type viewInternal struct {
- view *View // view is the canonicalized View definition associated with this view.
- subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
- collector *collector
-}
-
-func newViewInternal(v *View) (*viewInternal, error) {
- return &viewInternal{
- view: v,
- collector: &collector{make(map[string]AggregationData), v.Aggregation},
- }, nil
-}
-
-func (v *viewInternal) subscribe() {
- atomic.StoreUint32(&v.subscribed, 1)
-}
-
-func (v *viewInternal) unsubscribe() {
- atomic.StoreUint32(&v.subscribed, 0)
-}
-
-// isSubscribed returns true if the view is exporting
-// data by subscription.
-func (v *viewInternal) isSubscribed() bool {
- return atomic.LoadUint32(&v.subscribed) == 1
-}
-
-func (v *viewInternal) clearRows() {
- v.collector.clearRows()
-}
-
-func (v *viewInternal) collectedRows() []*Row {
- return v.collector.collectedRows(v.view.TagKeys)
-}
-
-func (v *viewInternal) addSample(m *tag.Map, val float64) {
- if !v.isSubscribed() {
- return
- }
- sig := string(encodeWithKeys(m, v.view.TagKeys))
- v.collector.addSample(sig, val)
-}
-
-// A Data is a set of rows about usage of the single measure associated
-// with the given view. Each row is specific to a unique set of tags.
-type Data struct {
- View *View
- Start, End time.Time
- Rows []*Row
-}
-
-// Row is the collected value for a specific set of key value pairs a.k.a tags.
-type Row struct {
- Tags []tag.Tag
- Data AggregationData
-}
-
-func (r *Row) String() string {
- var buffer bytes.Buffer
- buffer.WriteString("{ ")
- buffer.WriteString("{ ")
- for _, t := range r.Tags {
- buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value))
- }
- buffer.WriteString(" }")
- buffer.WriteString(fmt.Sprintf("%v", r.Data))
- buffer.WriteString(" }")
- return buffer.String()
-}
-
-// Equal returns true if both rows are equal. Tags are expected to be ordered
-// by the key name. Even both rows have the same tags but the tags appear in
-// different orders it will return false.
-func (r *Row) Equal(other *Row) bool {
- if r == other {
- return true
- }
- return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
-}
-
-func checkViewName(name string) error {
- if len(name) > internal.MaxNameLength {
- return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength)
- }
- if !internal.IsPrintable(name) {
- return fmt.Errorf("view name needs to be an ASCII string")
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go
deleted file mode 100644
index 9255d27d2..000000000
--- a/vendor/go.opencensus.io/stats/view/worker.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "fmt"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-func init() {
- defaultWorker = newWorker()
- go defaultWorker.start()
- internal.DefaultRecorder = record
-}
-
-type measureRef struct {
- measure string
- views map[*viewInternal]struct{}
-}
-
-type worker struct {
- measures map[string]*measureRef
- views map[string]*viewInternal
- startTimes map[*viewInternal]time.Time
-
- timer *time.Ticker
- c chan command
- quit, done chan bool
-}
-
-var defaultWorker *worker
-
-var defaultReportingDuration = 10 * time.Second
-
-// Find returns a registered view associated with this name.
-// If no registered view is found, nil is returned.
-func Find(name string) (v *View) {
- req := &getViewByNameReq{
- name: name,
- c: make(chan *getViewByNameResp),
- }
- defaultWorker.c <- req
- resp := <-req.c
- return resp.v
-}
-
-// Register begins collecting data for the given views.
-// Once a view is registered, it reports data to the registered exporters.
-func Register(views ...*View) error {
- for _, v := range views {
- if err := v.canonicalize(); err != nil {
- return err
- }
- }
- req := ®isterViewReq{
- views: views,
- err: make(chan error),
- }
- defaultWorker.c <- req
- return <-req.err
-}
-
-// Unregister the given views. Data will not longer be exported for these views
-// after Unregister returns.
-// It is not necessary to unregister from views you expect to collect for the
-// duration of your program execution.
-func Unregister(views ...*View) {
- names := make([]string, len(views))
- for i := range views {
- names[i] = views[i].Name
- }
- req := &unregisterFromViewReq{
- views: names,
- done: make(chan struct{}),
- }
- defaultWorker.c <- req
- <-req.done
-}
-
-// RetrieveData gets a snapshot of the data collected for the the view registered
-// with the given name. It is intended for testing only.
-func RetrieveData(viewName string) ([]*Row, error) {
- req := &retrieveDataReq{
- now: time.Now(),
- v: viewName,
- c: make(chan *retrieveDataResp),
- }
- defaultWorker.c <- req
- resp := <-req.c
- return resp.rows, resp.err
-}
-
-func record(tags *tag.Map, ms interface{}) {
- req := &recordReq{
- tm: tags,
- ms: ms.([]stats.Measurement),
- }
- defaultWorker.c <- req
-}
-
-// SetReportingPeriod sets the interval between reporting aggregated views in
-// the program. If duration is less than or equal to zero, it enables the
-// default behavior.
-//
-// Note: each exporter makes different promises about what the lowest supported
-// duration is. For example, the Stackdriver exporter recommends a value no
-// lower than 1 minute. Consult each exporter per your needs.
-func SetReportingPeriod(d time.Duration) {
- // TODO(acetechnologist): ensure that the duration d is more than a certain
- // value. e.g. 1s
- req := &setReportingPeriodReq{
- d: d,
- c: make(chan bool),
- }
- defaultWorker.c <- req
- <-req.c // don't return until the timer is set to the new duration.
-}
-
-func newWorker() *worker {
- return &worker{
- measures: make(map[string]*measureRef),
- views: make(map[string]*viewInternal),
- startTimes: make(map[*viewInternal]time.Time),
- timer: time.NewTicker(defaultReportingDuration),
- c: make(chan command, 1024),
- quit: make(chan bool),
- done: make(chan bool),
- }
-}
-
-func (w *worker) start() {
- for {
- select {
- case cmd := <-w.c:
- cmd.handleCommand(w)
- case <-w.timer.C:
- w.reportUsage(time.Now())
- case <-w.quit:
- w.timer.Stop()
- close(w.c)
- w.done <- true
- return
- }
- }
-}
-
-func (w *worker) stop() {
- w.quit <- true
- <-w.done
-}
-
-func (w *worker) getMeasureRef(name string) *measureRef {
- if mr, ok := w.measures[name]; ok {
- return mr
- }
- mr := &measureRef{
- measure: name,
- views: make(map[*viewInternal]struct{}),
- }
- w.measures[name] = mr
- return mr
-}
-
-func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
- vi, err := newViewInternal(v)
- if err != nil {
- return nil, err
- }
- if x, ok := w.views[vi.view.Name]; ok {
- if !x.view.same(vi.view) {
- return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
- }
-
- // the view is already registered so there is nothing to do and the
- // command is considered successful.
- return x, nil
- }
- w.views[vi.view.Name] = vi
- ref := w.getMeasureRef(vi.view.Measure.Name())
- ref.views[vi] = struct{}{}
- return vi, nil
-}
-
-func (w *worker) reportView(v *viewInternal, now time.Time) {
- if !v.isSubscribed() {
- return
- }
- rows := v.collectedRows()
- _, ok := w.startTimes[v]
- if !ok {
- w.startTimes[v] = now
- }
- viewData := &Data{
- View: v.view,
- Start: w.startTimes[v],
- End: time.Now(),
- Rows: rows,
- }
- exportersMu.Lock()
- for e := range exporters {
- e.ExportView(viewData)
- }
- exportersMu.Unlock()
-}
-
-func (w *worker) reportUsage(now time.Time) {
- for _, v := range w.views {
- w.reportView(v, now)
- }
-}
diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go
deleted file mode 100644
index 06c3c5464..000000000
--- a/vendor/go.opencensus.io/stats/view/worker_commands.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "errors"
- "fmt"
- "strings"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-type command interface {
- handleCommand(w *worker)
-}
-
-// getViewByNameReq is the command to get a view given its name.
-type getViewByNameReq struct {
- name string
- c chan *getViewByNameResp
-}
-
-type getViewByNameResp struct {
- v *View
-}
-
-func (cmd *getViewByNameReq) handleCommand(w *worker) {
- v := w.views[cmd.name]
- if v == nil {
- cmd.c <- &getViewByNameResp{nil}
- return
- }
- cmd.c <- &getViewByNameResp{v.view}
-}
-
-// registerViewReq is the command to register a view.
-type registerViewReq struct {
- views []*View
- err chan error
-}
-
-func (cmd *registerViewReq) handleCommand(w *worker) {
- var errstr []string
- for _, view := range cmd.views {
- vi, err := w.tryRegisterView(view)
- if err != nil {
- errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err))
- continue
- }
- internal.SubscriptionReporter(view.Measure.Name())
- vi.subscribe()
- }
- if len(errstr) > 0 {
- cmd.err <- errors.New(strings.Join(errstr, "\n"))
- } else {
- cmd.err <- nil
- }
-}
-
-// unregisterFromViewReq is the command to unregister to a view. Has no
-// impact on the data collection for client that are pulling data from the
-// library.
-type unregisterFromViewReq struct {
- views []string
- done chan struct{}
-}
-
-func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
- for _, name := range cmd.views {
- vi, ok := w.views[name]
- if !ok {
- continue
- }
-
- // Report pending data for this view before removing it.
- w.reportView(vi, time.Now())
-
- vi.unsubscribe()
- if !vi.isSubscribed() {
- // this was the last subscription and view is not collecting anymore.
- // The collected data can be cleared.
- vi.clearRows()
- }
- delete(w.views, name)
- }
- cmd.done <- struct{}{}
-}
-
-// retrieveDataReq is the command to retrieve data for a view.
-type retrieveDataReq struct {
- now time.Time
- v string
- c chan *retrieveDataResp
-}
-
-type retrieveDataResp struct {
- rows []*Row
- err error
-}
-
-func (cmd *retrieveDataReq) handleCommand(w *worker) {
- vi, ok := w.views[cmd.v]
- if !ok {
- cmd.c <- &retrieveDataResp{
- nil,
- fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v),
- }
- return
- }
-
- if !vi.isSubscribed() {
- cmd.c <- &retrieveDataResp{
- nil,
- fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v),
- }
- return
- }
- cmd.c <- &retrieveDataResp{
- vi.collectedRows(),
- nil,
- }
-}
-
-// recordReq is the command to record data related to multiple measures
-// at once.
-type recordReq struct {
- tm *tag.Map
- ms []stats.Measurement
-}
-
-func (cmd *recordReq) handleCommand(w *worker) {
- for _, m := range cmd.ms {
- if (m == stats.Measurement{}) { // not registered
- continue
- }
- ref := w.getMeasureRef(m.Measure().Name())
- for v := range ref.views {
- v.addSample(cmd.tm, m.Value())
- }
- }
-}
-
-// setReportingPeriodReq is the command to modify the duration between
-// reporting the collected data to the registered clients.
-type setReportingPeriodReq struct {
- d time.Duration
- c chan bool
-}
-
-func (cmd *setReportingPeriodReq) handleCommand(w *worker) {
- w.timer.Stop()
- if cmd.d <= 0 {
- w.timer = time.NewTicker(defaultReportingDuration)
- } else {
- w.timer = time.NewTicker(cmd.d)
- }
- cmd.c <- true
-}
diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go
deleted file mode 100644
index ed528bcb3..000000000
--- a/vendor/go.opencensus.io/tag/context.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import "context"
-
-// FromContext returns the tag map stored in the context.
-func FromContext(ctx context.Context) *Map {
- // The returned tag map shouldn't be mutated.
- ts := ctx.Value(mapCtxKey)
- if ts == nil {
- return nil
- }
- return ts.(*Map)
-}
-
-// NewContext creates a new context with the given tag map.
-// To propagate a tag map to downstream methods and downstream RPCs, add a tag map
-// to the current context. NewContext will return a copy of the current context,
-// and put the tag map into the returned one.
-// If there is already a tag map in the current context, it will be replaced with m.
-func NewContext(ctx context.Context, m *Map) context.Context {
- return context.WithValue(ctx, mapCtxKey, m)
-}
-
-type ctxKey struct{}
-
-var mapCtxKey = ctxKey{}
diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go
deleted file mode 100644
index da16b74e4..000000000
--- a/vendor/go.opencensus.io/tag/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package tag contains OpenCensus tags.
-
-Tags are key-value pairs. Tags provide additional cardinality to
-the OpenCensus instrumentation data.
-
-Tags can be propagated on the wire and in the same
-process via context.Context. Encode and Decode should be
-used to represent tags into their binary propagation form.
-*/
-package tag // import "go.opencensus.io/tag"
diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go
deleted file mode 100644
index ebbed9500..000000000
--- a/vendor/go.opencensus.io/tag/key.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-// Key represents a tag key.
-type Key struct {
- name string
-}
-
-// NewKey creates or retrieves a string key identified by name.
-// Calling NewKey consequently with the same name returns the same key.
-func NewKey(name string) (Key, error) {
- if !checkKeyName(name) {
- return Key{}, errInvalidKeyName
- }
- return Key{name: name}, nil
-}
-
-// Name returns the name of the key.
-func (k Key) Name() string {
- return k.name
-}
diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go
deleted file mode 100644
index 5b72ba6ad..000000000
--- a/vendor/go.opencensus.io/tag/map.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "bytes"
- "context"
- "fmt"
- "sort"
-)
-
-// Tag is a key value pair that can be propagated on wire.
-type Tag struct {
- Key Key
- Value string
-}
-
-// Map is a map of tags. Use New to create a context containing
-// a new Map.
-type Map struct {
- m map[Key]string
-}
-
-// Value returns the value for the key if a value for the key exists.
-func (m *Map) Value(k Key) (string, bool) {
- if m == nil {
- return "", false
- }
- v, ok := m.m[k]
- return v, ok
-}
-
-func (m *Map) String() string {
- if m == nil {
- return "nil"
- }
- keys := make([]Key, 0, len(m.m))
- for k := range m.m {
- keys = append(keys, k)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() })
-
- var buffer bytes.Buffer
- buffer.WriteString("{ ")
- for _, k := range keys {
- buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k]))
- }
- buffer.WriteString(" }")
- return buffer.String()
-}
-
-func (m *Map) insert(k Key, v string) {
- if _, ok := m.m[k]; ok {
- return
- }
- m.m[k] = v
-}
-
-func (m *Map) update(k Key, v string) {
- if _, ok := m.m[k]; ok {
- m.m[k] = v
- }
-}
-
-func (m *Map) upsert(k Key, v string) {
- m.m[k] = v
-}
-
-func (m *Map) delete(k Key) {
- delete(m.m, k)
-}
-
-func newMap() *Map {
- return &Map{m: make(map[Key]string)}
-}
-
-// Mutator modifies a tag map.
-type Mutator interface {
- Mutate(t *Map) (*Map, error)
-}
-
-// Insert returns a mutator that inserts a
-// value associated with k. If k already exists in the tag map,
-// mutator doesn't update the value.
-func Insert(k Key, v string) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.insert(k, v)
- return m, nil
- },
- }
-}
-
-// Update returns a mutator that updates the
-// value of the tag associated with k with v. If k doesn't
-// exists in the tag map, the mutator doesn't insert the value.
-func Update(k Key, v string) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.update(k, v)
- return m, nil
- },
- }
-}
-
-// Upsert returns a mutator that upserts the
-// value of the tag associated with k with v. It inserts the
-// value if k doesn't exist already. It mutates the value
-// if k already exists.
-func Upsert(k Key, v string) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.upsert(k, v)
- return m, nil
- },
- }
-}
-
-// Delete returns a mutator that deletes
-// the value associated with k.
-func Delete(k Key) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- m.delete(k)
- return m, nil
- },
- }
-}
-
-// New returns a new context that contains a tag map
-// originated from the incoming context and modified
-// with the provided mutators.
-func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
- m := newMap()
- orig := FromContext(ctx)
- if orig != nil {
- for k, v := range orig.m {
- if !checkKeyName(k.Name()) {
- return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
- }
- if !checkValue(v) {
- return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
- }
- m.insert(k, v)
- }
- }
- var err error
- for _, mod := range mutator {
- m, err = mod.Mutate(m)
- if err != nil {
- return ctx, err
- }
- }
- return NewContext(ctx, m), nil
-}
-
-// Do is similar to pprof.Do: a convenience for installing the tags
-// from the context as Go profiler labels. This allows you to
-// correlated runtime profiling with stats.
-//
-// It converts the key/values from the given map to Go profiler labels
-// and calls pprof.Do.
-//
-// Do is going to do nothing if your Go version is below 1.9.
-func Do(ctx context.Context, f func(ctx context.Context)) {
- do(ctx, f)
-}
-
-type mutator struct {
- fn func(t *Map) (*Map, error)
-}
-
-func (m *mutator) Mutate(t *Map) (*Map, error) {
- return m.fn(t)
-}
diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go
deleted file mode 100644
index 3e998950c..000000000
--- a/vendor/go.opencensus.io/tag/map_codec.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "encoding/binary"
- "fmt"
-)
-
-// KeyType defines the types of keys allowed. Currently only keyTypeString is
-// supported.
-type keyType byte
-
-const (
- keyTypeString keyType = iota
- keyTypeInt64
- keyTypeTrue
- keyTypeFalse
-
- tagsVersionID = byte(0)
-)
-
-type encoderGRPC struct {
- buf []byte
- writeIdx, readIdx int
-}
-
-// writeKeyString writes the fieldID '0' followed by the key string and value
-// string.
-func (eg *encoderGRPC) writeTagString(k, v string) {
- eg.writeByte(byte(keyTypeString))
- eg.writeStringWithVarintLen(k)
- eg.writeStringWithVarintLen(v)
-}
-
-func (eg *encoderGRPC) writeTagUint64(k string, i uint64) {
- eg.writeByte(byte(keyTypeInt64))
- eg.writeStringWithVarintLen(k)
- eg.writeUint64(i)
-}
-
-func (eg *encoderGRPC) writeTagTrue(k string) {
- eg.writeByte(byte(keyTypeTrue))
- eg.writeStringWithVarintLen(k)
-}
-
-func (eg *encoderGRPC) writeTagFalse(k string) {
- eg.writeByte(byte(keyTypeFalse))
- eg.writeStringWithVarintLen(k)
-}
-
-func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) {
- length := len(bytes)
-
- eg.growIfRequired(binary.MaxVarintLen64 + length)
- eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
- copy(eg.buf[eg.writeIdx:], bytes)
- eg.writeIdx += length
-}
-
-func (eg *encoderGRPC) writeStringWithVarintLen(s string) {
- length := len(s)
-
- eg.growIfRequired(binary.MaxVarintLen64 + length)
- eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
- copy(eg.buf[eg.writeIdx:], s)
- eg.writeIdx += length
-}
-
-func (eg *encoderGRPC) writeByte(v byte) {
- eg.growIfRequired(1)
- eg.buf[eg.writeIdx] = v
- eg.writeIdx++
-}
-
-func (eg *encoderGRPC) writeUint32(i uint32) {
- eg.growIfRequired(4)
- binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i)
- eg.writeIdx += 4
-}
-
-func (eg *encoderGRPC) writeUint64(i uint64) {
- eg.growIfRequired(8)
- binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i)
- eg.writeIdx += 8
-}
-
-func (eg *encoderGRPC) readByte() byte {
- b := eg.buf[eg.readIdx]
- eg.readIdx++
- return b
-}
-
-func (eg *encoderGRPC) readUint32() uint32 {
- i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:])
- eg.readIdx += 4
- return i
-}
-
-func (eg *encoderGRPC) readUint64() uint64 {
- i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:])
- eg.readIdx += 8
- return i
-}
-
-func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) {
- if eg.readEnded() {
- return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
- }
- length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:])
- if valueStart <= 0 {
- return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
- }
-
- valueStart += eg.readIdx
- valueEnd := valueStart + int(length)
- if valueEnd > len(eg.buf) {
- return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf))
- }
-
- eg.readIdx = valueEnd
- return eg.buf[valueStart:valueEnd], nil
-}
-
-func (eg *encoderGRPC) readStringWithVarintLen() (string, error) {
- bytes, err := eg.readBytesWithVarintLen()
- if err != nil {
- return "", err
- }
- return string(bytes), nil
-}
-
-func (eg *encoderGRPC) growIfRequired(expected int) {
- if len(eg.buf)-eg.writeIdx < expected {
- tmp := make([]byte, 2*(len(eg.buf)+1)+expected)
- copy(tmp, eg.buf)
- eg.buf = tmp
- }
-}
-
-func (eg *encoderGRPC) readEnded() bool {
- return eg.readIdx >= len(eg.buf)
-}
-
-func (eg *encoderGRPC) bytes() []byte {
- return eg.buf[:eg.writeIdx]
-}
-
-// Encode encodes the tag map into a []byte. It is useful to propagate
-// the tag maps on wire in binary format.
-func Encode(m *Map) []byte {
- eg := &encoderGRPC{
- buf: make([]byte, len(m.m)),
- }
- eg.writeByte(byte(tagsVersionID))
- for k, v := range m.m {
- eg.writeByte(byte(keyTypeString))
- eg.writeStringWithVarintLen(k.name)
- eg.writeBytesWithVarintLen([]byte(v))
- }
- return eg.bytes()
-}
-
-// Decode decodes the given []byte into a tag map.
-func Decode(bytes []byte) (*Map, error) {
- ts := newMap()
- err := DecodeEach(bytes, ts.upsert)
- if err != nil {
- // no partial failures
- return nil, err
- }
- return ts, nil
-}
-
-// DecodeEach decodes the given serialized tag map, calling handler for each
-// tag key and value decoded.
-func DecodeEach(bytes []byte, fn func(key Key, val string)) error {
- eg := &encoderGRPC{
- buf: bytes,
- }
- if len(eg.buf) == 0 {
- return nil
- }
-
- version := eg.readByte()
- if version > tagsVersionID {
- return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
- }
-
- for !eg.readEnded() {
- typ := keyType(eg.readByte())
-
- if typ != keyTypeString {
- return fmt.Errorf("cannot decode: invalid key type: %q", typ)
- }
-
- k, err := eg.readBytesWithVarintLen()
- if err != nil {
- return err
- }
-
- v, err := eg.readBytesWithVarintLen()
- if err != nil {
- return err
- }
-
- key, err := NewKey(string(k))
- if err != nil {
- return err
- }
- val := string(v)
- if !checkValue(val) {
- return errInvalidValue
- }
- fn(key, val)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go
deleted file mode 100644
index f81cd0b4a..000000000
--- a/vendor/go.opencensus.io/tag/profile_19.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.9
-
-package tag
-
-import (
- "context"
- "runtime/pprof"
-)
-
-func do(ctx context.Context, f func(ctx context.Context)) {
- m := FromContext(ctx)
- keyvals := make([]string, 0, 2*len(m.m))
- for k, v := range m.m {
- keyvals = append(keyvals, k.Name(), v)
- }
- pprof.Do(ctx, pprof.Labels(keyvals...), f)
-}
diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go
deleted file mode 100644
index 83adbce56..000000000
--- a/vendor/go.opencensus.io/tag/profile_not19.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.9
-
-package tag
-
-import "context"
-
-func do(ctx context.Context, f func(ctx context.Context)) {
- f(ctx)
-}
diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go
deleted file mode 100644
index 0939fc674..000000000
--- a/vendor/go.opencensus.io/tag/validate.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tag
-
-import "errors"
-
-const (
- maxKeyLength = 255
-
- // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')).
- validKeyValueMin = 32
- validKeyValueMax = 126
-)
-
-var (
- errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters")
- errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters")
-)
-
-func checkKeyName(name string) bool {
- if len(name) == 0 {
- return false
- }
- if len(name) > maxKeyLength {
- return false
- }
- return isASCII(name)
-}
-
-func isASCII(s string) bool {
- for _, c := range s {
- if (c < validKeyValueMin) || (c > validKeyValueMax) {
- return false
- }
- }
- return true
-}
-
-func checkValue(v string) bool {
- if len(v) > maxKeyLength {
- return false
- }
- return isASCII(v)
-}
diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go
deleted file mode 100644
index 01f0f9083..000000000
--- a/vendor/go.opencensus.io/trace/basetypes.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "fmt"
- "time"
-)
-
-type (
- // TraceID is a 16-byte identifier for a set of spans.
- TraceID [16]byte
-
- // SpanID is an 8-byte identifier for a single span.
- SpanID [8]byte
-)
-
-func (t TraceID) String() string {
- return fmt.Sprintf("%02x", t[:])
-}
-
-func (s SpanID) String() string {
- return fmt.Sprintf("%02x", s[:])
-}
-
-// Annotation represents a text annotation with a set of attributes and a timestamp.
-type Annotation struct {
- Time time.Time
- Message string
- Attributes map[string]interface{}
-}
-
-// Attribute represents a key-value pair on a span, link or annotation.
-// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute.
-type Attribute struct {
- key string
- value interface{}
-}
-
-// BoolAttribute returns a bool-valued attribute.
-func BoolAttribute(key string, value bool) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Int64Attribute returns an int64-valued attribute.
-func Int64Attribute(key string, value int64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// StringAttribute returns a string-valued attribute.
-func StringAttribute(key string, value string) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// LinkType specifies the relationship between the span that had the link
-// added, and the linked span.
-type LinkType int32
-
-// LinkType values.
-const (
- LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
- LinkTypeChild // The current span is a child of the linked span.
- LinkTypeParent // The current span is the parent of the linked span.
-)
-
-// Link represents a reference from one span to another span.
-type Link struct {
- TraceID TraceID
- SpanID SpanID
- Type LinkType
- // Attributes is a set of attributes on the link.
- Attributes map[string]interface{}
-}
-
-// MessageEventType specifies the type of message event.
-type MessageEventType int32
-
-// MessageEventType values.
-const (
- MessageEventTypeUnspecified MessageEventType = iota // Unknown event type.
- MessageEventTypeSent // Indicates a sent RPC message.
- MessageEventTypeRecv // Indicates a received RPC message.
-)
-
-// MessageEvent represents an event describing a message sent or received on the network.
-type MessageEvent struct {
- Time time.Time
- EventType MessageEventType
- MessageID int64
- UncompressedByteSize int64
- CompressedByteSize int64
-}
-
-// Status is the status of a Span.
-type Status struct {
- // Code is a status code. Zero indicates success.
- //
- // If Code will be propagated to Google APIs, it ideally should be a value from
- // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto .
- Code int32
- Message string
-}
diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go
deleted file mode 100644
index 0816892ea..000000000
--- a/vendor/go.opencensus.io/trace/config.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
-
- "go.opencensus.io/trace/internal"
-)
-
-// Config represents the global tracing configuration.
-type Config struct {
- // DefaultSampler is the default sampler used when creating new spans.
- DefaultSampler Sampler
-
- // IDGenerator is for internal use only.
- IDGenerator internal.IDGenerator
-}
-
-var configWriteMu sync.Mutex
-
-// ApplyConfig applies changes to the global tracing configuration.
-//
-// Fields not provided in the given config are going to be preserved.
-func ApplyConfig(cfg Config) {
- configWriteMu.Lock()
- defer configWriteMu.Unlock()
- c := *config.Load().(*Config)
- if cfg.DefaultSampler != nil {
- c.DefaultSampler = cfg.DefaultSampler
- }
- if cfg.IDGenerator != nil {
- c.IDGenerator = cfg.IDGenerator
- }
- config.Store(&c)
-}
diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go
deleted file mode 100644
index 04b1ee4f3..000000000
--- a/vendor/go.opencensus.io/trace/doc.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package trace contains support for OpenCensus distributed tracing.
-
-The following assumes a basic familiarity with OpenCensus concepts.
-See http://opencensus.io
-
-
-Exporting Traces
-
-To export collected tracing data, register at least one exporter. You can use
-one of the provided exporters or write your own.
-
- trace.RegisterExporter(exporter)
-
-By default, traces will be sampled relatively rarely. To change the sampling
-frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
-to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
-
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
-
-Be careful about using trace.AlwaysSample in a production application with
-significant traffic: a new trace will be started and exported for every request.
-
-Adding Spans to a Trace
-
-A trace consists of a tree of spans. In Go, the current span is carried in a
-context.Context.
-
-It is common to want to capture all the activity of a function call in a span. For
-this to work, the function must take a context.Context as a parameter. Add these two
-lines to the top of the function:
-
- ctx, span := trace.StartSpan(ctx, "example.com/Run")
- defer span.End()
-
-StartSpan will create a new top-level span if the context
-doesn't contain another span, otherwise it will create a child span.
-*/
-package trace // import "go.opencensus.io/trace"
diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go
deleted file mode 100644
index 77a8c7357..000000000
--- a/vendor/go.opencensus.io/trace/export.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Exporter is a type for functions that receive sampled trace spans.
-//
-// The ExportSpan method should be safe for concurrent use and should return
-// quickly; if an Exporter takes a significant amount of time to process a
-// SpanData, that work should be done on another goroutine.
-//
-// The SpanData should not be modified, but a pointer to it can be kept.
-type Exporter interface {
- ExportSpan(s *SpanData)
-}
-
-type exportersMap map[Exporter]struct{}
-
-var (
- exporterMu sync.Mutex
- exporters atomic.Value
-)
-
-// RegisterExporter adds to the list of Exporters that will receive sampled
-// trace spans.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- new[e] = struct{}{}
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// UnregisterExporter removes from the list of Exporters the Exporter that was
-// registered with the given name.
-func UnregisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- delete(new, e)
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// SpanData contains all the information collected by a Span.
-type SpanData struct {
- SpanContext
- ParentSpanID SpanID
- SpanKind int
- Name string
- StartTime time.Time
- // The wall clock time of EndTime will be adjusted to always be offset
- // from StartTime by the duration of the span.
- EndTime time.Time
- // The values of Attributes each have type string, bool, or int64.
- Attributes map[string]interface{}
- Annotations []Annotation
- MessageEvents []MessageEvent
- Status
- Links []Link
- HasRemoteParent bool
-}
diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go
deleted file mode 100644
index 1c8b9b34b..000000000
--- a/vendor/go.opencensus.io/trace/internal/internal.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides trace internals.
-package internal
-
-type IDGenerator interface {
- NewTraceID() [16]byte
- NewSpanID() [8]byte
-}
diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go
deleted file mode 100644
index 1eb190a96..000000000
--- a/vendor/go.opencensus.io/trace/propagation/propagation.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package propagation implements the binary trace context format.
-package propagation // import "go.opencensus.io/trace/propagation"
-
-// TODO: link to external spec document.
-
-// BinaryFormat format:
-//
-// Binary value:
-// version_id: 1 byte representing the version id.
-//
-// For version_id = 0:
-//
-// version_format:
-// field_format:
-//
-// Fields:
-//
-// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id.
-// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id.
-// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options.
-//
-// Fields MUST be encoded using the field id order (smaller to higher).
-//
-// Valid value example:
-//
-// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97,
-// 98, 99, 100, 101, 102, 103, 104, 2, 1}
-//
-// version_id = 0;
-// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}
-// span_id = {97, 98, 99, 100, 101, 102, 103, 104};
-// trace_options = {1};
-
-import (
- "net/http"
-
- "go.opencensus.io/trace"
-)
-
-// Binary returns the binary format representation of a SpanContext.
-//
-// If sc is the zero value, Binary returns nil.
-func Binary(sc trace.SpanContext) []byte {
- if sc == (trace.SpanContext{}) {
- return nil
- }
- var b [29]byte
- copy(b[2:18], sc.TraceID[:])
- b[18] = 1
- copy(b[19:27], sc.SpanID[:])
- b[27] = 2
- b[28] = uint8(sc.TraceOptions)
- return b[:]
-}
-
-// FromBinary returns the SpanContext represented by b.
-//
-// If b has an unsupported version ID or contains no TraceID, FromBinary
-// returns with ok==false.
-func FromBinary(b []byte) (sc trace.SpanContext, ok bool) {
- if len(b) == 0 || b[0] != 0 {
- return trace.SpanContext{}, false
- }
- b = b[1:]
- if len(b) >= 17 && b[0] == 0 {
- copy(sc.TraceID[:], b[1:17])
- b = b[17:]
- } else {
- return trace.SpanContext{}, false
- }
- if len(b) >= 9 && b[0] == 1 {
- copy(sc.SpanID[:], b[1:9])
- b = b[9:]
- }
- if len(b) >= 2 && b[0] == 2 {
- sc.TraceOptions = trace.TraceOptions(b[1])
- }
- return sc, true
-}
-
-// HTTPFormat implementations propagate span contexts
-// in HTTP requests.
-//
-// SpanContextFromRequest extracts a span context from incoming
-// requests.
-//
-// SpanContextToRequest modifies the given request to include the given
-// span context.
-type HTTPFormat interface {
- SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool)
- SpanContextToRequest(sc trace.SpanContext, req *http.Request)
-}
-
-// TODO(jbd): Find a more representative but short name for HTTPFormat.
diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go
deleted file mode 100644
index 71c10f9e3..000000000
--- a/vendor/go.opencensus.io/trace/sampling.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "encoding/binary"
-)
-
-const defaultSamplingProbability = 1e-4
-
-// Sampler decides whether a trace should be sampled and exported.
-type Sampler func(SamplingParameters) SamplingDecision
-
-// SamplingParameters contains the values passed to a Sampler.
-type SamplingParameters struct {
- ParentContext SpanContext
- TraceID TraceID
- SpanID SpanID
- Name string
- HasRemoteParent bool
-}
-
-// SamplingDecision is the value returned by a Sampler.
-type SamplingDecision struct {
- Sample bool
-}
-
-// ProbabilitySampler returns a Sampler that samples a given fraction of traces.
-//
-// It also samples spans whose parents are sampled.
-func ProbabilitySampler(fraction float64) Sampler {
- if !(fraction >= 0) {
- fraction = 0
- } else if fraction >= 1 {
- return AlwaysSample()
- }
-
- traceIDUpperBound := uint64(fraction * (1 << 63))
- return Sampler(func(p SamplingParameters) SamplingDecision {
- if p.ParentContext.IsSampled() {
- return SamplingDecision{Sample: true}
- }
- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
- return SamplingDecision{Sample: x < traceIDUpperBound}
- })
-}
-
-// AlwaysSample returns a Sampler that samples every trace.
-// Be careful about using this sampler in a production application with
-// significant traffic: a new trace will be started and exported for every
-// request.
-func AlwaysSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: true}
- }
-}
-
-// NeverSample returns a Sampler that samples no traces.
-func NeverSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: false}
- }
-}
diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go
deleted file mode 100644
index fbabad34c..000000000
--- a/vendor/go.opencensus.io/trace/spanbucket.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "time"
-)
-
-// samplePeriod is the minimum time between accepting spans in a single bucket.
-const samplePeriod = time.Second
-
-// defaultLatencies contains the default latency bucket bounds.
-// TODO: consider defaults, make configurable
-var defaultLatencies = [...]time.Duration{
- 10 * time.Microsecond,
- 100 * time.Microsecond,
- time.Millisecond,
- 10 * time.Millisecond,
- 100 * time.Millisecond,
- time.Second,
- 10 * time.Second,
- time.Minute,
-}
-
-// bucket is a container for a set of spans for a particular error code or latency range.
-type bucket struct {
- nextTime time.Time // next time we can accept a span
- buffer []*SpanData // circular buffer of spans
- nextIndex int // location next SpanData should be placed in buffer
- overflow bool // whether the circular buffer has wrapped around
-}
-
-func makeBucket(bufferSize int) bucket {
- return bucket{
- buffer: make([]*SpanData, bufferSize),
- }
-}
-
-// add adds a span to the bucket, if nextTime has been reached.
-func (b *bucket) add(s *SpanData) {
- if s.EndTime.Before(b.nextTime) {
- return
- }
- if len(b.buffer) == 0 {
- return
- }
- b.nextTime = s.EndTime.Add(samplePeriod)
- b.buffer[b.nextIndex] = s
- b.nextIndex++
- if b.nextIndex == len(b.buffer) {
- b.nextIndex = 0
- b.overflow = true
- }
-}
-
-// size returns the number of spans in the bucket.
-func (b *bucket) size() int {
- if b.overflow {
- return len(b.buffer)
- }
- return b.nextIndex
-}
-
-// span returns the ith span in the bucket.
-func (b *bucket) span(i int) *SpanData {
- if !b.overflow {
- return b.buffer[i]
- }
- if i < len(b.buffer)-b.nextIndex {
- return b.buffer[b.nextIndex+i]
- }
- return b.buffer[b.nextIndex+i-len(b.buffer)]
-}
-
-// resize changes the size of the bucket to n, keeping up to n existing spans.
-func (b *bucket) resize(n int) {
- cur := b.size()
- newBuffer := make([]*SpanData, n)
- if cur < n {
- for i := 0; i < cur; i++ {
- newBuffer[i] = b.span(i)
- }
- b.buffer = newBuffer
- b.nextIndex = cur
- b.overflow = false
- return
- }
- for i := 0; i < n; i++ {
- newBuffer[i] = b.span(i + cur - n)
- }
- b.buffer = newBuffer
- b.nextIndex = 0
- b.overflow = true
-}
-
-// latencyBucket returns the appropriate bucket number for a given latency.
-func latencyBucket(latency time.Duration) int {
- i := 0
- for i < len(defaultLatencies) && latency >= defaultLatencies[i] {
- i++
- }
- return i
-}
-
-// latencyBucketBounds returns the lower and upper bounds for a latency bucket
-// number.
-//
-// The lower bound is inclusive, the upper bound is exclusive (except for the
-// last bucket.)
-func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) {
- if index == 0 {
- return 0, defaultLatencies[index]
- }
- if index == len(defaultLatencies) {
- return defaultLatencies[index-1], 1<<63 - 1
- }
- return defaultLatencies[index-1], defaultLatencies[index]
-}
diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go
deleted file mode 100644
index c442d9902..000000000
--- a/vendor/go.opencensus.io/trace/spanstore.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "time"
-
- "go.opencensus.io/internal"
-)
-
-const (
- maxBucketSize = 100000
- defaultBucketSize = 10
-)
-
-var (
- ssmu sync.RWMutex // protects spanStores
- spanStores = make(map[string]*spanStore)
-)
-
-// This exists purely to avoid exposing internal methods used by z-Pages externally.
-type internalOnly struct{}
-
-func init() {
- //TODO(#412): remove
- internal.Trace = &internalOnly{}
-}
-
-// ReportActiveSpans returns the active spans for the given name.
-func (i internalOnly) ReportActiveSpans(name string) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for span := range s.active {
- out = append(out, span.makeSpanData())
- }
- return out
-}
-
-// ReportSpansByError returns a sample of error spans.
-//
-// If code is nonzero, only spans with that status code are returned.
-func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- if code != 0 {
- if b, ok := s.errors[code]; ok {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- } else {
- for _, b := range s.errors {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- }
- return out
-}
-
-// ConfigureBucketSizes sets the number of spans to keep per latency and error
-// bucket for different span names.
-func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) {
- for _, bc := range bcs {
- latencyBucketSize := bc.MaxRequestsSucceeded
- if latencyBucketSize < 0 {
- latencyBucketSize = 0
- }
- if latencyBucketSize > maxBucketSize {
- latencyBucketSize = maxBucketSize
- }
- errorBucketSize := bc.MaxRequestsErrors
- if errorBucketSize < 0 {
- errorBucketSize = 0
- }
- if errorBucketSize > maxBucketSize {
- errorBucketSize = maxBucketSize
- }
- spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize)
- }
-}
-
-// ReportSpansPerMethod returns a summary of what spans are being stored for each span name.
-func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary {
- out := make(map[string]internal.PerMethodSummary)
- ssmu.RLock()
- defer ssmu.RUnlock()
- for name, s := range spanStores {
- s.mu.Lock()
- p := internal.PerMethodSummary{
- Active: len(s.active),
- }
- for code, b := range s.errors {
- p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{
- ErrorCode: code,
- Size: b.size(),
- })
- }
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{
- MinLatency: min,
- MaxLatency: max,
- Size: b.size(),
- })
- }
- s.mu.Unlock()
- out[name] = p
- }
- return out
-}
-
-// ReportSpansByLatency returns a sample of successful spans.
-//
-// minLatency is the minimum latency of spans to be returned.
-// maxLatency, if nonzero, is the maximum latency of spans to be returned.
-func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- if i+1 != len(s.latency) && max <= minLatency {
- continue
- }
- if maxLatency != 0 && maxLatency < min {
- continue
- }
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- if minLatency != 0 || maxLatency != 0 {
- d := sd.EndTime.Sub(sd.StartTime)
- if d < minLatency {
- continue
- }
- if maxLatency != 0 && d > maxLatency {
- continue
- }
- }
- out = append(out, sd)
- }
- }
- return out
-}
-
-// spanStore keeps track of spans stored for a particular span name.
-//
-// It contains all active spans; a sample of spans for failed requests,
-// categorized by error code; and a sample of spans for successful requests,
-// bucketed by latency.
-type spanStore struct {
- mu sync.Mutex // protects everything below.
- active map[*Span]struct{}
- errors map[int32]*bucket
- latency []bucket
- maxSpansPerErrorBucket int
-}
-
-// newSpanStore creates a span store.
-func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore {
- s := &spanStore{
- active: make(map[*Span]struct{}),
- latency: make([]bucket, len(defaultLatencies)+1),
- maxSpansPerErrorBucket: errorBucketSize,
- }
- for i := range s.latency {
- s.latency[i] = makeBucket(latencyBucketSize)
- }
- return s
-}
-
-// spanStoreForName returns the spanStore for the given name.
-//
-// It returns nil if it doesn't exist.
-func spanStoreForName(name string) *spanStore {
- var s *spanStore
- ssmu.RLock()
- s, _ = spanStores[name]
- ssmu.RUnlock()
- return s
-}
-
-// spanStoreForNameCreateIfNew returns the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreForNameCreateIfNew(name string) *spanStore {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- return s
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- return s
- }
- s = newSpanStore(name, defaultBucketSize, defaultBucketSize)
- spanStores[name] = s
- return s
-}
-
-// spanStoreSetSize resizes the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- s = newSpanStore(name, latencyBucketSize, errorBucketSize)
- spanStores[name] = s
-}
-
-func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) {
- s.mu.Lock()
- for i := range s.latency {
- s.latency[i].resize(latencyBucketSize)
- }
- for _, b := range s.errors {
- b.resize(errorBucketSize)
- }
- s.maxSpansPerErrorBucket = errorBucketSize
- s.mu.Unlock()
-}
-
-// add adds a span to the active bucket of the spanStore.
-func (s *spanStore) add(span *Span) {
- s.mu.Lock()
- s.active[span] = struct{}{}
- s.mu.Unlock()
-}
-
-// finished removes a span from the active set, and adds a corresponding
-// SpanData to a latency or error bucket.
-func (s *spanStore) finished(span *Span, sd *SpanData) {
- latency := sd.EndTime.Sub(sd.StartTime)
- if latency < 0 {
- latency = 0
- }
- code := sd.Status.Code
-
- s.mu.Lock()
- delete(s.active, span)
- if code == 0 {
- s.latency[latencyBucket(latency)].add(sd)
- } else {
- if s.errors == nil {
- s.errors = make(map[int32]*bucket)
- }
- if b := s.errors[code]; b != nil {
- b.add(sd)
- } else {
- b := makeBucket(s.maxSpansPerErrorBucket)
- s.errors[code] = &b
- b.add(sd)
- }
- }
- s.mu.Unlock()
-}
diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go
deleted file mode 100644
index ec60effd1..000000000
--- a/vendor/go.opencensus.io/trace/status_codes.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-// Status codes for use with Span.SetStatus. These correspond to the status
-// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-const (
- StatusCodeOK = 0
- StatusCodeCancelled = 1
- StatusCodeUnknown = 2
- StatusCodeInvalidArgument = 3
- StatusCodeDeadlineExceeded = 4
- StatusCodeNotFound = 5
- StatusCodeAlreadyExists = 6
- StatusCodePermissionDenied = 7
- StatusCodeResourceExhausted = 8
- StatusCodeFailedPrecondition = 9
- StatusCodeAborted = 10
- StatusCodeOutOfRange = 11
- StatusCodeUnimplemented = 12
- StatusCodeInternal = 13
- StatusCodeUnavailable = 14
- StatusCodeDataLoss = 15
- StatusCodeUnauthenticated = 16
-)
diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go
deleted file mode 100644
index 77578a3c5..000000000
--- a/vendor/go.opencensus.io/trace/trace.go
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- crand "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/internal"
- "go.opencensus.io/trace/tracestate"
-)
-
-// Span represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type Span struct {
- // data contains information recorded about the span.
- //
- // It will be non-nil if we are exporting the span or recording events for it.
- // Otherwise, data is nil, and the Span is simply a carrier for the
- // SpanContext, so that the trace ID is propagated.
- data *SpanData
- mu sync.Mutex // protects the contents of *data (but not the pointer value.)
- spanContext SpanContext
- // spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
- *spanStore
- endOnce sync.Once
-
- executionTracerTaskEnd func() // ends the execution tracer span
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *Span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.data != nil
-}
-
-// TraceOptions contains options associated with a trace span.
-type TraceOptions uint32
-
-// IsSampled returns true if the span will be exported.
-func (sc SpanContext) IsSampled() bool {
- return sc.TraceOptions.IsSampled()
-}
-
-// setIsSampled sets the TraceOptions bit that determines whether the span will be exported.
-func (sc *SpanContext) setIsSampled(sampled bool) {
- if sampled {
- sc.TraceOptions |= 1
- } else {
- sc.TraceOptions &= ^TraceOptions(1)
- }
-}
-
-// IsSampled returns true if the span will be exported.
-func (t TraceOptions) IsSampled() bool {
- return t&1 == 1
-}
-
-// SpanContext contains the state that must propagate across process boundaries.
-//
-// SpanContext is not an implementation of context.Context.
-// TODO: add reference to external Census docs for SpanContext.
-type SpanContext struct {
- TraceID TraceID
- SpanID SpanID
- TraceOptions TraceOptions
- Tracestate *tracestate.Tracestate
-}
-
-type contextKey struct{}
-
-// FromContext returns the Span stored in a context, or nil if there isn't one.
-func FromContext(ctx context.Context) *Span {
- s, _ := ctx.Value(contextKey{}).(*Span)
- return s
-}
-
-// NewContext returns a new context with the given Span attached.
-func NewContext(parent context.Context, s *Span) context.Context {
- return context.WithValue(parent, contextKey{}, s)
-}
-
-// All available span kinds. Span kind must be either one of these values.
-const (
- SpanKindUnspecified = iota
- SpanKindServer
- SpanKindClient
-)
-
-// StartOptions contains options concerning how a span is started.
-type StartOptions struct {
- // Sampler to consult for this Span. If provided, it is always consulted.
- //
- // If not provided, then the behavior differs based on whether
- // the parent of this Span is remote, local, or there is no parent.
- // In the case of a remote parent or no parent, the
- // default sampler (see Config) will be consulted. Otherwise,
- // when there is a non-remote parent, no new sampling decision will be made:
- // we will preserve the sampling of the parent.
- Sampler Sampler
-
- // SpanKind represents the kind of a span. If none is set,
- // SpanKindUnspecified is used.
- SpanKind int
-}
-
-// StartOption apply changes to StartOptions.
-type StartOption func(*StartOptions)
-
-// WithSpanKind makes new spans to be created with the given kind.
-func WithSpanKind(spanKind int) StartOption {
- return func(o *StartOptions) {
- o.SpanKind = spanKind
- }
-}
-
-// WithSampler makes new spans to be be created with a custom sampler.
-// Otherwise, the global sampler is used.
-func WithSampler(sampler Sampler) StartOption {
- return func(o *StartOptions) {
- o.Sampler = sampler
- }
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- var parent SpanContext
- if p := FromContext(ctx); p != nil {
- parent = p.spanContext
- }
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts)
-
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- return NewContext(ctx, span), span
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts)
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- return NewContext(ctx, span), span
-}
-
-func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
- span := &Span{}
- span.spanContext = parent
-
- cfg := config.Load().(*Config)
-
- if !hasParent {
- span.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
- }
- span.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
- sampler := cfg.DefaultSampler
-
- if !hasParent || remoteParent || o.Sampler != nil {
- // If this span is the child of a local span and no Sampler is set in the
- // options, keep the parent's TraceOptions.
- //
- // Otherwise, consult the Sampler in the options if it is non-nil, otherwise
- // the default sampler.
- if o.Sampler != nil {
- sampler = o.Sampler
- }
- span.spanContext.setIsSampled(sampler(SamplingParameters{
- ParentContext: parent,
- TraceID: span.spanContext.TraceID,
- SpanID: span.spanContext.SpanID,
- Name: name,
- HasRemoteParent: remoteParent}).Sample)
- }
-
- if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() {
- return span
- }
-
- span.data = &SpanData{
- SpanContext: span.spanContext,
- StartTime: time.Now(),
- SpanKind: o.SpanKind,
- Name: name,
- HasRemoteParent: remoteParent,
- }
- if hasParent {
- span.data.ParentSpanID = parent.SpanID
- }
- if internal.LocalSpanStoreEnabled {
- var ss *spanStore
- ss = spanStoreForNameCreateIfNew(name)
- if ss != nil {
- span.spanStore = ss
- ss.add(span)
- }
- }
-
- return span
-}
-
-// End ends the span.
-func (s *Span) End() {
- if !s.IsRecordingEvents() {
- return
- }
- s.endOnce.Do(func() {
- if s.executionTracerTaskEnd != nil {
- s.executionTracerTaskEnd()
- }
- exp, _ := exporters.Load().(exportersMap)
- mustExport := s.spanContext.IsSampled() && len(exp) > 0
- if s.spanStore != nil || mustExport {
- sd := s.makeSpanData()
- sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
- if s.spanStore != nil {
- s.spanStore.finished(s, sd)
- }
- if mustExport {
- for e := range exp {
- e.ExportSpan(sd)
- }
- }
- }
- })
-}
-
-// makeSpanData produces a SpanData representing the current state of the Span.
-// It requires that s.data is non-nil.
-func (s *Span) makeSpanData() *SpanData {
- var sd SpanData
- s.mu.Lock()
- sd = *s.data
- if s.data.Attributes != nil {
- sd.Attributes = make(map[string]interface{})
- for k, v := range s.data.Attributes {
- sd.Attributes[k] = v
- }
- }
- s.mu.Unlock()
- return &sd
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *Span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.spanContext
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *Span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Name = name
- s.mu.Unlock()
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *Span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Status = status
- s.mu.Unlock()
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *Span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- if s.data.Attributes == nil {
- s.data.Attributes = make(map[string]interface{})
- }
- copyAttributes(s.data.Attributes, attributes)
- s.mu.Unlock()
-}
-
-// copyAttributes copies a slice of Attributes into a map.
-func copyAttributes(m map[string]interface{}, attributes []Attribute) {
- for _, a := range attributes {
- m[a.key] = a.value
- }
-}
-
-func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) {
- now := time.Now()
- msg := fmt.Sprintf(format, a...)
- var m map[string]interface{}
- s.mu.Lock()
- if len(attributes) != 0 {
- m = make(map[string]interface{})
- copyAttributes(m, attributes)
- }
- s.data.Annotations = append(s.data.Annotations, Annotation{
- Time: now,
- Message: msg,
- Attributes: m,
- })
- s.mu.Unlock()
-}
-
-func (s *Span) printStringInternal(attributes []Attribute, str string) {
- now := time.Now()
- var a map[string]interface{}
- s.mu.Lock()
- if len(attributes) != 0 {
- a = make(map[string]interface{})
- copyAttributes(a, attributes)
- }
- s.data.Annotations = append(s.data.Annotations, Annotation{
- Time: now,
- Message: str,
- Attributes: a,
- })
- s.mu.Unlock()
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *Span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.lazyPrintfInternal(attributes, format, a...)
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
- Time: now,
- EventType: MessageEventTypeSent,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
- Time: now,
- EventType: MessageEventTypeRecv,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddLink adds a link to the span.
-func (s *Span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Links = append(s.data.Links, l)
- s.mu.Unlock()
-}
-
-func (s *Span) String() string {
- if s == nil {
- return ""
- }
- if s.data == nil {
- return fmt.Sprintf("span %s", s.spanContext.SpanID)
- }
- s.mu.Lock()
- str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name)
- s.mu.Unlock()
- return str
-}
-
-var config atomic.Value // access atomically
-
-func init() {
- gen := &defaultIDGenerator{}
- // initialize traceID and spanID generators.
- var rngSeed int64
- for _, p := range []interface{}{
- &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
- } {
- binary.Read(crand.Reader, binary.LittleEndian, p)
- }
- gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
- gen.spanIDInc |= 1
-
- config.Store(&Config{
- DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
- IDGenerator: gen,
- })
-}
-
-type defaultIDGenerator struct {
- sync.Mutex
-
- // Please keep these as the first fields
- // so that these 8 byte fields will be aligned on addresses
- // divisible by 8, on both 32-bit and 64-bit machines when
- // performing atomic increments and accesses.
- // See:
- // * https://github.com/census-instrumentation/opencensus-go/issues/587
- // * https://github.com/census-instrumentation/opencensus-go/issues/865
- // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG
- nextSpanID uint64
- spanIDInc uint64
-
- traceIDAdd [2]uint64
- traceIDRand *rand.Rand
-}
-
-// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *defaultIDGenerator) NewSpanID() [8]byte {
- var id uint64
- for id == 0 {
- id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
- }
- var sid [8]byte
- binary.LittleEndian.PutUint64(sid[:], id)
- return sid
-}
-
-// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence.
-// mu should be held while this function is called.
-func (gen *defaultIDGenerator) NewTraceID() [16]byte {
- var tid [16]byte
- // Construct the trace ID from two outputs of traceIDRand, with a constant
- // added to each half for additional entropy.
- gen.Lock()
- binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0])
- binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1])
- gen.Unlock()
- return tid
-}
diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go
deleted file mode 100644
index b7d8aaf28..000000000
--- a/vendor/go.opencensus.io/trace/trace_go11.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.11
-
-package trace
-
-import (
- "context"
- t "runtime/trace"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- if !t.IsEnabled() {
- // Avoid additional overhead if
- // runtime/trace is not enabled.
- return ctx, func() {}
- }
- nctx, task := t.NewTask(ctx, name)
- return nctx, task.End
-}
diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go
deleted file mode 100644
index e25419859..000000000
--- a/vendor/go.opencensus.io/trace/trace_nongo11.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.11
-
-package trace
-
-import (
- "context"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- return ctx, func() {}
-}
diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go
deleted file mode 100644
index 2345dd379..000000000
--- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tracestate
-
-import (
- "fmt"
- "regexp"
-)
-
-const (
- keyMaxSize = 256
- valueMaxSize = 256
- maxKeyValuePairs = 32
-)
-
-const (
- keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
- keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
- keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
-)
-
-var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`)
-var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`)
-
-// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different
-// vendors propagate additional information and inter-operate with their legacy Id formats.
-type Tracestate struct {
- entries []Entry
-}
-
-// Entry represents one key-value pair in a list of key-value pair of Tracestate.
-type Entry struct {
- // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter,
- // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and
- // forward slashes /.
- Key string
-
- // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the
- // range 0x20 to 0x7E) except comma , and =.
- Value string
-}
-
-// Entries returns a slice of Entry.
-func (ts *Tracestate) Entries() []Entry {
- if ts == nil {
- return nil
- }
- return ts.entries
-}
-
-func (ts *Tracestate) remove(key string) *Entry {
- for index, entry := range ts.entries {
- if entry.Key == key {
- ts.entries = append(ts.entries[:index], ts.entries[index+1:]...)
- return &entry
- }
- }
- return nil
-}
-
-func (ts *Tracestate) add(entries []Entry) error {
- for _, entry := range entries {
- ts.remove(entry.Key)
- }
- if len(ts.entries)+len(entries) > maxKeyValuePairs {
- return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d",
- len(entries), len(ts.entries), maxKeyValuePairs)
- }
- ts.entries = append(entries, ts.entries...)
- return nil
-}
-
-func isValid(entry Entry) bool {
- return keyValidationRegExp.MatchString(entry.Key) &&
- valueValidationRegExp.MatchString(entry.Value)
-}
-
-func containsDuplicateKey(entries ...Entry) (string, bool) {
- keyMap := make(map[string]int)
- for _, entry := range entries {
- if _, ok := keyMap[entry.Key]; ok {
- return entry.Key, true
- }
- keyMap[entry.Key] = 1
- }
- return "", false
-}
-
-func areEntriesValid(entries ...Entry) (*Entry, bool) {
- for _, entry := range entries {
- if !isValid(entry) {
- return &entry, false
- }
- }
- return nil, true
-}
-
-// New creates a Tracestate object from a parent and/or entries (key-value pair).
-// Entries from the parent are copied if present. The entries passed to this function
-// are inserted in front of those copied from the parent. If an entry copied from the
-// parent contains the same key as one of the entry in entries then the entry copied
-// from the parent is removed. See add func.
-//
-// An error is returned with nil Tracestate if
-// 1. one or more entry in entries is invalid.
-// 2. two or more entries in the input entries have the same key.
-// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs.
-// (duplicate entry is counted only once).
-func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) {
- if parent == nil && len(entries) == 0 {
- return nil, nil
- }
- if entry, ok := areEntriesValid(entries...); !ok {
- return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value)
- }
-
- if key, duplicate := containsDuplicateKey(entries...); duplicate {
- return nil, fmt.Errorf("contains duplicate keys (%s)", key)
- }
-
- tracestate := Tracestate{}
-
- if parent != nil && len(parent.entries) > 0 {
- tracestate.entries = append([]Entry{}, parent.entries...)
- }
-
- err := tracestate.add(entries)
- if err != nil {
- return nil, err
- }
- return &tracestate, nil
-}
diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go
index a79a8c13a..efe6e7302 100644
--- a/vendor/golang.org/x/crypto/openpgp/keys.go
+++ b/vendor/golang.org/x/crypto/openpgp/keys.go
@@ -333,7 +333,6 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
- var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
@@ -349,7 +348,7 @@ EachPacket:
// Make a new Identity object, that we might wind up throwing away.
// We'll only add it if we get a valid self-signature over this
// userID.
- current = new(Identity)
+ current := new(Identity)
current.Name = pkt.Id
current.UserId = pkt
@@ -384,11 +383,9 @@ EachPacket:
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
- } else if current == nil {
- return nil, errors.StructuralError("signature packet found before user id packet")
- } else {
- current.Signatures = append(current.Signatures, pkt)
}
+ // Else, ignoring the signature as it does not follow anything
+ // we would know to attach it to.
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
@@ -433,26 +430,45 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
- p, err := packets.Next()
- if err == io.EOF {
- return io.ErrUnexpectedEOF
+
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ packets.Unread(p)
+ break
+ }
+
+ if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
+ return errors.StructuralError("subkey signature with wrong type")
+ }
+
+ if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+
+ switch sig.SigType {
+ case packet.SigTypeSubkeyRevocation:
+ subKey.Sig = sig
+ case packet.SigTypeSubkeyBinding:
+ if subKey.Sig == nil {
+ subKey.Sig = sig
+ }
+ }
}
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
- var ok bool
- subKey.Sig, ok = p.(*packet.Signature)
- if !ok {
+
+ if subKey.Sig == nil {
return errors.StructuralError("subkey packet not followed by signature")
}
- if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
- return errors.StructuralError("subkey signature with wrong type")
- }
- err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
+
e.Subkeys = append(e.Subkeys, subKey)
+
return nil
}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index 34d95822f..2261dc386 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -903,8 +903,8 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
// Implemented based on the documentation at
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
- magic := append([]byte("openssh-key-v1"), 0)
- if !bytes.Equal(magic, key[0:len(magic)]) {
+ const magic = "openssh-key-v1\x00"
+ if len(key) < len(magic) || string(key[:len(magic)]) != magic {
return nil, errors.New("ssh: invalid openssh private key format")
}
remaining := key[len(magic):]
diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
index 5eb7c5a8f..a3a918f0b 100644
--- a/vendor/golang.org/x/net/html/const.go
+++ b/vendor/golang.org/x/net/html/const.go
@@ -97,8 +97,16 @@ func isSpecialElement(element *Node) bool {
switch element.Namespace {
case "", "html":
return isSpecialElementMap[element.Data]
+ case "math":
+ switch element.Data {
+ case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
+ return true
+ }
case "svg":
- return element.Data == "foreignObject"
+ switch element.Data {
+ case "foreignObject", "desc", "title":
+ return true
+ }
}
return false
}
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index 091fb0d13..9889c09be 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -470,6 +470,10 @@ func (p *parser) resetInsertionMode() {
case a.Table:
p.im = inTableIM
case a.Template:
+ // TODO: remove this divergence from the HTML5 spec.
+ if n.Namespace != "" {
+ continue
+ }
p.im = p.templateStack.top()
case a.Head:
// TODO: remove this divergence from the HTML5 spec.
@@ -984,6 +988,14 @@ func inBodyIM(p *parser) bool {
p.acknowledgeSelfClosingTag()
p.popUntil(buttonScope, a.P)
p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+ if p.form == nil {
+ // NOTE: The 'isindex' element has been removed,
+ // and the 'template' element has not been designed to be
+ // collaborative with the index element.
+ //
+ // Ignore the token.
+ return true
+ }
if action != "" {
p.form.Attr = []Attribute{{Key: "action", Val: action}}
}
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
index dfbed62cf..46aa2b12d 100644
--- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -4,15 +4,16 @@ Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
+
## Filing issues
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
@@ -22,5 +23,9 @@ The gophers there will answer or ask you to file an issue if you've tripped over
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 000000000..8962c49d1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+ internal.RegisterContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+ return urlfetch.Client(ctx), nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index a31607437..b4b62745c 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -18,6 +18,20 @@ import (
"golang.org/x/oauth2"
)
+// DefaultCredentials holds "Application Default Credentials".
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+type DefaultCredentials struct {
+ ProjectID string // may be empty
+ TokenSource oauth2.TokenSource
+
+ // JSON contains the raw bytes from a JSON credentials file.
+ // This field may be nil if authentication is provided by the
+ // environment and not with a credentials file, e.g. when code is
+ // running on Google Cloud Platform.
+ JSON []byte
+}
+
// DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials.
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
@@ -39,12 +53,25 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc
return creds.TokenSource, nil
}
-// Common implementation for FindDefaultCredentials.
-func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) {
+// FindDefaultCredentials searches for "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+// 1. A JSON file whose path is specified by the
+// GOOGLE_APPLICATION_CREDENTIALS environment variable.
+// 2. A JSON file in a location known to the gcloud command-line tool.
+// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// 3. On Google App Engine it uses the appengine.AccessToken function.
+// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
+// credentials from the metadata server.
+// (In this final case any provided scopes are ignored.)
+func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
- creds, err := readCredentialsFile(ctx, filename, scopes)
+ creds, err := readCredentialsFile(ctx, filename, scope)
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
}
@@ -53,7 +80,7 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede
// Second, try a well-known file.
filename := wellKnownFile()
- if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil {
+ if creds, err := readCredentialsFile(ctx, filename, scope); err == nil {
return creds, nil
} else if !os.IsNotExist(err) {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
@@ -63,7 +90,7 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede
if appengineTokenFunc != nil && !appengineFlex {
return &DefaultCredentials{
ProjectID: appengineAppIDFunc(ctx),
- TokenSource: AppEngineTokenSource(ctx, scopes...),
+ TokenSource: AppEngineTokenSource(ctx, scope...),
}, nil
}
@@ -81,23 +108,6 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
}
-// Common implementation for CredentialsFromJSON.
-func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) {
- var f credentialsFile
- if err := json.Unmarshal(jsonData, &f); err != nil {
- return nil, err
- }
- ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
- if err != nil {
- return nil, err
- }
- return &DefaultCredentials{
- ProjectID: f.ProjectID,
- TokenSource: ts,
- JSON: jsonData,
- }, nil
-}
-
func wellKnownFile() string {
const f = "application_default_credentials.json"
if runtime.GOOS == "windows" {
@@ -111,5 +121,17 @@ func readCredentialsFile(ctx context.Context, filename string, scopes []string)
if err != nil {
return nil, err
}
- return CredentialsFromJSON(ctx, b, scopes...)
+ var f credentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
+ if err != nil {
+ return nil, err
+ }
+ return &DefaultCredentials{
+ ProjectID: f.ProjectID,
+ TokenSource: ts,
+ JSON: b,
+ }, nil
}
diff --git a/vendor/golang.org/x/oauth2/google/doc_go19.go b/vendor/golang.org/x/oauth2/google/doc_go19.go
deleted file mode 100644
index 2a86325fe..000000000
--- a/vendor/golang.org/x/oauth2/google/doc_go19.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.9
-
-// Package google provides support for making OAuth2 authorized and authenticated
-// HTTP requests to Google APIs. It supports the Web server flow, client-side
-// credentials, service accounts, Google Compute Engine service accounts, and Google
-// App Engine service accounts.
-//
-// A brief overview of the package follows. For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-//
-// OAuth2 Configs
-//
-// Two functions in this package return golang.org/x/oauth2.Config values from Google credential
-// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON,
-// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
-// create an http.Client.
-//
-//
-// Credentials
-//
-// The Credentials type represents Google credentials, including Application Default
-// Credentials.
-//
-// Use FindDefaultCredentials to obtain Application Default Credentials.
-// FindDefaultCredentials looks in some well-known places for a credentials file, and
-// will call AppEngineTokenSource or ComputeTokenSource as needed.
-//
-// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials,
-// then use the credentials to construct an http.Client or an oauth2.TokenSource.
-//
-// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats
-// described in OAuth2 Configs, above. The TokenSource in the returned value is the
-// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or
-// JWTConfigFromJSON, but the Credentials may contain additional information
-// that is useful is some circumstances.
-package google // import "golang.org/x/oauth2/google"
diff --git a/vendor/golang.org/x/oauth2/google/doc_not_go19.go b/vendor/golang.org/x/oauth2/google/doc_not_go19.go
deleted file mode 100644
index 5c3c6e148..000000000
--- a/vendor/golang.org/x/oauth2/google/doc_not_go19.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.9
-
-// Package google provides support for making OAuth2 authorized and authenticated
-// HTTP requests to Google APIs. It supports the Web server flow, client-side
-// credentials, service accounts, Google Compute Engine service accounts, and Google
-// App Engine service accounts.
-//
-// A brief overview of the package follows. For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-//
-// OAuth2 Configs
-//
-// Two functions in this package return golang.org/x/oauth2.Config values from Google credential
-// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON,
-// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
-// create an http.Client.
-//
-//
-// Credentials
-//
-// The DefaultCredentials type represents Google Application Default Credentials, as
-// well as other forms of credential.
-//
-// Use FindDefaultCredentials to obtain Application Default Credentials.
-// FindDefaultCredentials looks in some well-known places for a credentials file, and
-// will call AppEngineTokenSource or ComputeTokenSource as needed.
-//
-// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials,
-// then use the credentials to construct an http.Client or an oauth2.TokenSource.
-//
-// Use CredentialsFromJSON to obtain credentials from either of the two JSON
-// formats described in OAuth2 Configs, above. (The DefaultCredentials returned may
-// not be "Application Default Credentials".) The TokenSource in the returned value
-// is the same as the one obtained from the oauth2.Config returned from
-// ConfigFromJSON or JWTConfigFromJSON, but the DefaultCredentials may contain
-// additional information that is useful is some circumstances.
-package google // import "golang.org/x/oauth2/google"
diff --git a/vendor/golang.org/x/oauth2/google/go19.go b/vendor/golang.org/x/oauth2/google/go19.go
deleted file mode 100644
index 4d0318b1e..000000000
--- a/vendor/golang.org/x/oauth2/google/go19.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.9
-
-package google
-
-import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
-)
-
-// Credentials holds Google credentials, including "Application Default Credentials".
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-type Credentials struct {
- ProjectID string // may be empty
- TokenSource oauth2.TokenSource
-
- // JSON contains the raw bytes from a JSON credentials file.
- // This field may be nil if authentication is provided by the
- // environment and not with a credentials file, e.g. when code is
- // running on Google Cloud Platform.
- JSON []byte
-}
-
-// DefaultCredentials is the old name of Credentials.
-//
-// Deprecated: use Credentials instead.
-type DefaultCredentials = Credentials
-
-// FindDefaultCredentials searches for "Application Default Credentials".
-//
-// It looks for credentials in the following places,
-// preferring the first location found:
-//
-// 1. A JSON file whose path is specified by the
-// GOOGLE_APPLICATION_CREDENTIALS environment variable.
-// 2. A JSON file in a location known to the gcloud command-line tool.
-// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
-// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// 3. On Google App Engine it uses the appengine.AccessToken function.
-// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
-// credentials from the metadata server.
-// (In this final case any provided scopes are ignored.)
-func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) {
- return findDefaultCredentials(ctx, scopes)
-}
-
-// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can
-// represent either a Google Developers Console client_credentials.json file (as in
-// ConfigFromJSON) or a Google Developers service account key file (as in
-// JWTConfigFromJSON).
-func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) {
- return credentialsFromJSON(ctx, jsonData, scopes)
-}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
index f7481fbcc..66a8b0e18 100644
--- a/vendor/golang.org/x/oauth2/google/google.go
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -2,7 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package google
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+package google // import "golang.org/x/oauth2/google"
import (
"encoding/json"
diff --git a/vendor/golang.org/x/oauth2/google/not_go19.go b/vendor/golang.org/x/oauth2/google/not_go19.go
deleted file mode 100644
index 544e40624..000000000
--- a/vendor/golang.org/x/oauth2/google/not_go19.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.9
-
-package google
-
-import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
-)
-
-// DefaultCredentials holds Google credentials, including "Application Default Credentials".
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-type DefaultCredentials struct {
- ProjectID string // may be empty
- TokenSource oauth2.TokenSource
-
- // JSON contains the raw bytes from a JSON credentials file.
- // This field may be nil if authentication is provided by the
- // environment and not with a credentials file, e.g. when code is
- // running on Google Cloud Platform.
- JSON []byte
-}
-
-// FindDefaultCredentials searches for "Application Default Credentials".
-//
-// It looks for credentials in the following places,
-// preferring the first location found:
-//
-// 1. A JSON file whose path is specified by the
-// GOOGLE_APPLICATION_CREDENTIALS environment variable.
-// 2. A JSON file in a location known to the gcloud command-line tool.
-// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
-// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// 3. On Google App Engine it uses the appengine.AccessToken function.
-// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
-// credentials from the metadata server.
-// (In this final case any provided scopes are ignored.)
-func FindDefaultCredentials(ctx context.Context, scopes ...string) (*DefaultCredentials, error) {
- return findDefaultCredentials(ctx, scopes)
-}
-
-// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can
-// represent either a Google Developers Console client_credentials.json file (as in
-// ConfigFromJSON) or a Google Developers service account key file (as in
-// JWTConfigFromJSON).
-//
-// Note: despite the name, the returned credentials may not be Application Default Credentials.
-func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*DefaultCredentials, error) {
- return credentialsFromJSON(ctx, jsonData, scopes)
-}
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
index b9660cadd..bdc18084b 100644
--- a/vendor/golang.org/x/oauth2/google/sdk.go
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -5,11 +5,9 @@
package google
import (
- "bufio"
"encoding/json"
"errors"
"fmt"
- "io"
"net/http"
"os"
"os/user"
@@ -20,6 +18,7 @@ import (
"golang.org/x/net/context"
"golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
)
type sdkCredentials struct {
@@ -77,7 +76,7 @@ func NewSDKConfig(account string) (*SDKConfig, error) {
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
}
defer f.Close()
- ini, err := parseINI(f)
+ ini, err := internal.ParseINI(f)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
}
@@ -147,34 +146,6 @@ func (c *SDKConfig) Scopes() []string {
return c.conf.Scopes
}
-func parseINI(ini io.Reader) (map[string]map[string]string, error) {
- result := map[string]map[string]string{
- "": {}, // root section
- }
- scanner := bufio.NewScanner(ini)
- currentSection := ""
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if strings.HasPrefix(line, ";") {
- // comment.
- continue
- }
- if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
- currentSection = strings.TrimSpace(line[1 : len(line)-1])
- result[currentSection] = map[string]string{}
- continue
- }
- parts := strings.SplitN(line, "=", 2)
- if len(parts) == 2 && parts[0] != "" {
- result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
- }
- }
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("error scanning ini: %v", err)
- }
- return result, nil
-}
-
// sdkConfigPath tries to guess where the gcloud config is located.
// It can be overridden during tests.
var sdkConfigPath = func() (string, error) {
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
deleted file mode 100644
index 743487188..000000000
--- a/vendor/golang.org/x/oauth2/internal/client_appengine.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import "google.golang.org/appengine/urlfetch"
-
-func init() {
- appengineClientHook = urlfetch.Client
-}
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
index fc63fcab3..6978192a9 100644
--- a/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -5,11 +5,14 @@
package internal
import (
+ "bufio"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
+ "io"
+ "strings"
)
// ParseKey converts the binary contents of a private key file
@@ -35,3 +38,38 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
}
return parsed, nil
}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+ result := map[string]map[string]string{
+ "": {}, // root section
+ }
+ scanner := bufio.NewScanner(ini)
+ currentSection := ""
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, ";") {
+ // comment.
+ continue
+ }
+ if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ currentSection = strings.TrimSpace(line[1 : len(line)-1])
+ result[currentSection] = map[string]string{}
+ continue
+ }
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) == 2 && parts[0] != "" {
+ result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning ini: %v", err)
+ }
+ return result, nil
+}
+
+func CondVal(v string) []string {
+ if v == "" {
+ return nil
+ }
+ return []string{v}
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index 5c5451ad8..cf959ea69 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -6,7 +6,6 @@ package internal
import (
"encoding/json"
- "errors"
"fmt"
"io"
"io/ioutil"
@@ -21,7 +20,7 @@ import (
"golang.org/x/net/context/ctxhttp"
)
-// Token represents the credentials used to authorize
+// Token represents the crendentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
@@ -101,15 +100,12 @@ var brokenAuthHeaderProviders = []string{
"https://api.pushbullet.com/",
"https://api.soundcloud.com/",
"https://api.twitch.tv/",
- "https://id.twitch.tv/",
"https://app.box.com/",
- "https://api.box.com/",
"https://connect.stripe.com/",
- "https://login.mailchimp.com/",
+ "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214
"https://login.microsoftonline.com/",
"https://login.salesforce.com/",
"https://login.windows.net",
- "https://login.live.com/",
"https://oauth.sandbox.trainingpeaks.com/",
"https://oauth.trainingpeaks.com/",
"https://oauth.vk.com/",
@@ -126,17 +122,10 @@ var brokenAuthHeaderProviders = []string{
"https://api.patreon.com/",
"https://sandbox.codeswholesale.com/oauth/token",
"https://api.sipgate.com/v1/authorization/oauth",
- "https://api.medium.com/v1/tokens",
- "https://log.finalsurge.com/oauth/token",
- "https://multisport.todaysplan.com.au/rest/oauth/access_token",
- "https://whats.todaysplan.com.au/rest/oauth/access_token",
- "https://stackoverflow.com/oauth/access_token",
- "https://account.health.nokia.com",
}
// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
var brokenAuthHeaderDomains = []string{
- ".auth0.com",
".force.com",
".myshopify.com",
".okta.com",
@@ -179,6 +168,10 @@ func providerAuthHeaderWorks(tokenURL string) bool {
}
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
+ hc, err := ContextClient(ctx)
+ if err != nil {
+ return nil, err
+ }
bustedAuth := !providerAuthHeaderWorks(tokenURL)
if bustedAuth {
if clientID != "" {
@@ -196,7 +189,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
if !bustedAuth {
req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
}
- r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
+ r, err := ctxhttp.Do(ctx, hc, req)
if err != nil {
return nil, err
}
@@ -206,10 +199,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
- return nil, &RetrieveError{
- Response: r,
- Body: body,
- }
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
}
var token *Token
@@ -256,17 +246,5 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
if token.RefreshToken == "" {
token.RefreshToken = v.Get("refresh_token")
}
- if token.AccessToken == "" {
- return token, errors.New("oauth2: server response missing access_token")
- }
return token, nil
}
-
-type RetrieveError struct {
- Response *http.Response
- Body []byte
-}
-
-func (r *RetrieveError) Error() string {
- return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
-}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
index d16f9ae1f..783bd98c8 100644
--- a/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -19,16 +19,50 @@ var HTTPClient ContextKey
// because nobody else can create a ContextKey, being unexported.
type ContextKey struct{}
-var appengineClientHook func(context.Context) *http.Client
+// ContextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error. If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type ContextClientFunc func(context.Context) (*http.Client, error)
-func ContextClient(ctx context.Context) *http.Client {
+var contextClientFuncs []ContextClientFunc
+
+func RegisterContextClientFunc(fn ContextClientFunc) {
+ contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func ContextClient(ctx context.Context) (*http.Client, error) {
if ctx != nil {
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
- return hc
+ return hc, nil
}
}
- if appengineClientHook != nil {
- return appengineClientHook(ctx)
+ for _, fn := range contextClientFuncs {
+ c, err := fn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ return c, nil
+ }
}
- return http.DefaultClient
+ return http.DefaultClient, nil
+}
+
+func ContextTransport(ctx context.Context) http.RoundTripper {
+ hc, err := ContextClient(ctx)
+ // This is a rare error case (somebody using nil on App Engine).
+ if err != nil {
+ return ErrorTransport{err}
+ }
+ return hc.Transport
+}
+
+// ErrorTransport returns the specified error on RoundTrip.
+// This RoundTripper should be used in rare error cases where
+// error handling can be postponed to response handling time.
+type ErrorTransport struct{ Err error }
+
+func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+ return nil, t.Err
}
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
index e08f31595..e016db421 100644
--- a/vendor/golang.org/x/oauth2/jwt/jwt.go
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -124,10 +124,7 @@ func (js jwtSource) Token() (*oauth2.Token, error) {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if c := resp.StatusCode; c < 200 || c > 299 {
- return nil, &oauth2.RetrieveError{
- Response: resp,
- Body: body,
- }
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
}
// tokenRes is the JSON response body.
var tokenRes struct {
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index 16775d081..4bafe873d 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -3,8 +3,7 @@
// license that can be found in the LICENSE file.
// Package oauth2 provides support for making
-// OAuth2 authorized and authenticated HTTP requests,
-// as specified in RFC 6749.
+// OAuth2 authorized and authenticated HTTP requests.
// It can additionally grant authorization with Bearer JWT.
package oauth2 // import "golang.org/x/oauth2"
@@ -118,30 +117,21 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// that asks for permissions for the required scopes explicitly.
//
// State is a token to protect the user from CSRF attacks. You must
-// always provide a non-empty string and validate that it matches the
+// always provide a non-zero string and validate that it matches the
// the state query parameter on your redirect callback.
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
// as ApprovalForce.
-// It can also be used to pass the PKCE challange.
-// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
var buf bytes.Buffer
buf.WriteString(c.Endpoint.AuthURL)
v := url.Values{
"response_type": {"code"},
"client_id": {c.ClientID},
- }
- if c.RedirectURL != "" {
- v.Set("redirect_uri", c.RedirectURL)
- }
- if len(c.Scopes) > 0 {
- v.Set("scope", strings.Join(c.Scopes, " "))
- }
- if state != "" {
- // TODO(light): Docs say never to omit state; don't allow empty.
- v.Set("state", state)
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ "state": internal.CondVal(state),
}
for _, opt := range opts {
opt.setValue(v)
@@ -167,15 +157,12 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
// The HTTP client to use is derived from the context.
// If nil, http.DefaultClient is used.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
- v := url.Values{
+ return retrieveToken(ctx, c, url.Values{
"grant_type": {"password"},
"username": {username},
"password": {password},
- }
- if len(c.Scopes) > 0 {
- v.Set("scope", strings.Join(c.Scopes, " "))
- }
- return retrieveToken(ctx, c, v)
+ "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
+ })
}
// Exchange converts an authorization code into a token.
@@ -188,21 +175,12 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
//
// The code will be in the *http.Request.FormValue("code"). Before
// calling Exchange, be sure to validate FormValue("state").
-//
-// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
-// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
-func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
- v := url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- }
- if c.RedirectURL != "" {
- v.Set("redirect_uri", c.RedirectURL)
- }
- for _, opt := range opts {
- opt.setValue(v)
- }
- return retrieveToken(ctx, c, v)
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ "redirect_uri": internal.CondVal(c.RedirectURL),
+ })
}
// Client returns an HTTP client using the provided token.
@@ -322,11 +300,15 @@ var HTTPClient internal.ContextKey
// packages.
func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil {
- return internal.ContextClient(ctx)
+ c, err := internal.ContextClient(ctx)
+ if err != nil {
+ return &http.Client{Transport: internal.ErrorTransport{Err: err}}
+ }
+ return c
}
return &http.Client{
Transport: &Transport{
- Base: internal.ContextClient(ctx).Transport,
+ Base: internal.ContextTransport(ctx),
Source: ReuseTokenSource(nil, src),
},
}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 34db8cdc8..7a3167f15 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -5,7 +5,6 @@
package oauth2
import (
- "fmt"
"net/http"
"net/url"
"strconv"
@@ -21,7 +20,7 @@ import (
// expirations due to client-server time mismatches.
const expiryDelta = 10 * time.Second
-// Token represents the credentials used to authorize
+// Token represents the crendentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
@@ -124,7 +123,7 @@ func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
- return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
+ return t.Expiry.Add(-expiryDelta).Before(time.Now())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
@@ -153,23 +152,7 @@ func tokenFromInternal(t *internal.Token) *Token {
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
if err != nil {
- if rErr, ok := err.(*internal.RetrieveError); ok {
- return nil, (*RetrieveError)(rErr)
- }
return nil, err
}
return tokenFromInternal(tk), nil
}
-
-// RetrieveError is the error returned when the token endpoint returns a
-// non-2XX HTTP status code.
-type RetrieveError struct {
- Response *http.Response
- // Body is the body that was consumed by reading Response.Body.
- // It may be truncated.
- Body []byte
-}
-
-func (r *RetrieveError) Error() string {
- return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
-}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
index aa0d34f1e..92ac7e253 100644
--- a/vendor/golang.org/x/oauth2/transport.go
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -31,17 +31,9 @@ type Transport struct {
}
// RoundTrip authorizes and authenticates the request with an
-// access token from Transport's Source.
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- reqBodyClosed := false
- if req.Body != nil {
- defer func() {
- if !reqBodyClosed {
- req.Body.Close()
- }
- }()
- }
-
if t.Source == nil {
return nil, errors.New("oauth2: Transport's Source is nil")
}
@@ -54,10 +46,6 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
token.SetAuthHeader(req2)
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
-
- // req.Body is assumed to have been closed by the base RoundTripper.
- reqBodyClosed = true
-
if err != nil {
t.setModReq(req, nil)
return nil, err
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
index 27f92c99d..5e5fb4510 100644
--- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -6,17 +6,7 @@
// +build ppc
// Functions to access/create device major and minor numbers matching the
-// encoding used by the Linux kernel and glibc.
-//
-// The information below is extracted and adapted from bits/sysmacros.h in the
-// glibc sources:
-//
-// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
-// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
-// number and m is a hex digit of the minor number. This is backward compatible
-// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
-// backward compatible with the Linux kernel, which for some architectures uses
-// 32-bit dev_t, encoded as mmmM MMmm.
+// encoding used by AIX.
package unix
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
index 65d93a689..8b401244c 100644
--- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -6,17 +6,7 @@
// +build ppc64
// Functions to access/create device major and minor numbers matching the
-// encoding used by the Linux kernel and glibc.
-//
-// The information below is extracted and adapted from bits/sysmacros.h in the
-// glibc sources:
-//
-// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
-// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
-// number and m is a hex digit of the minor number. This is backward compatible
-// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
-// backward compatible with the Linux kernel, which for some architectures uses
-// 32-bit dev_t, encoded as mmmM MMmm.
+// encoding used AIX.
package unix
diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go
index 8c9aaeb27..f121a8d64 100644
--- a/vendor/golang.org/x/sys/unix/ioctl.go
+++ b/vendor/golang.org/x/sys/unix/ioctl.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index a2baa818f..ec8e61322 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -46,6 +46,7 @@ includes_AIX='
#include
#include
#include
+#include
#include
#include
@@ -86,6 +87,7 @@ includes_DragonFly='
#include
#include
#include
+#include
#include
#include
#include
@@ -193,6 +195,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -214,6 +217,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -244,6 +248,16 @@ struct ltchars {
#define FS_KEY_DESC_PREFIX "fscrypt:"
#define FS_KEY_DESC_PREFIX_SIZE 8
#define FS_MAX_KEY_SIZE 64
+
+// XDP socket constants do not appear to be picked up otherwise.
+// Copied from samples/bpf/xdpsock_user.c.
+#ifndef SOL_XDP
+#define SOL_XDP 283
+#endif
+
+#ifndef AF_XDP
+#define AF_XDP 44
+#endif
'
includes_NetBSD='
@@ -252,6 +266,7 @@ includes_NetBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -277,6 +292,7 @@ includes_OpenBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -378,6 +394,7 @@ ccflags="$@"
$2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
$2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
+ $2 !~ /^ECCAPBITS/ &&
$2 !~ /^ETH_/ &&
$2 !~ /^EPROC_/ &&
$2 !~ /^EQUIV_/ &&
@@ -413,7 +430,7 @@ ccflags="$@"
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
- $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
+ $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
$2 ~ /^TP_STATUS_/ ||
$2 ~ /^FALLOC_/ ||
$2 == "ICMPV6_FILTER" ||
@@ -424,6 +441,7 @@ ccflags="$@"
$2 ~ /^KERN_(HOSTNAME|OS(RELEASE|TYPE)|VERSION)$/ ||
$2 ~ /^HW_MACHINE$/ ||
$2 ~ /^SYSCTL_VERS/ ||
+ $2 !~ "MNT_BITS" &&
$2 ~ /^(MS|MNT|UMOUNT)_/ ||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
$2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ ||
@@ -474,6 +492,7 @@ ccflags="$@"
$2 ~ /^FSOPT_/ ||
$2 ~ /^WDIOC_/ ||
$2 ~ /^NFN/ ||
+ $2 ~ /^XDP_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ ||
$2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index 86d7c7bd0..df1f9ea3d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -345,11 +345,11 @@ func IoctlSetInt(fd int, req uint, value int) error {
return ioctl(fd, req, uintptr(value))
}
-func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+func ioctlSetWinsize(fd int, req uint, value *Winsize) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
-func IoctlSetTermios(fd int, req uint, value *Termios) error {
+func ioctlSetTermios(fd int, req uint, value *Termios) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
@@ -419,8 +419,10 @@ func Flock(fd int, how int) (err error) {
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Kill(pid int, sig syscall.Signal) (err error)
//sys Klogctl(typ int, buf []byte) (n int, err error) = syslog
+//sys Mkdir(dirfd int, path string, mode uint32) (err error)
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index eb6335402..6c9296215 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -692,6 +692,24 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil
}
+type SockaddrXDP struct {
+ Flags uint16
+ Ifindex uint32
+ QueueID uint32
+ SharedUmemFD uint32
+ raw RawSockaddrXDP
+}
+
+func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_XDP
+ sa.raw.Flags = sa.Flags
+ sa.raw.Ifindex = sa.Ifindex
+ sa.raw.Queue_id = sa.QueueID
+ sa.raw.Shared_umem_fd = sa.SharedUmemFD
+
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil
+}
+
func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_NETLINK:
@@ -793,6 +811,15 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
}
return sa, nil
}
+ case AF_XDP:
+ pp := (*RawSockaddrXDP)(unsafe.Pointer(rsa))
+ sa := &SockaddrXDP{
+ Flags: pp.Flags,
+ Ifindex: pp.Ifindex,
+ QueueID: pp.Queue_id,
+ SharedUmemFD: pp.Shared_umem_fd,
+ }
+ return sa, nil
}
return nil, EAFNOSUPPORT
}
@@ -1320,6 +1347,7 @@ func Getpgrp() (pid int) {
//sys Llistxattr(path string, dest []byte) (sz int, err error)
//sys Lremovexattr(path string, attr string) (err error)
//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error)
+//sys MemfdCreate(name string, flags int) (fd int, err error)
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go
index 355ad6cf5..18fbddd52 100644
--- a/vendor/golang.org/x/sys/unix/types_aix.go
+++ b/vendor/golang.org/x/sys/unix/types_aix.go
@@ -22,6 +22,11 @@ package unix
#include
#include
#include
+#include
+#include
+#include
+#include
+#include
#include
@@ -33,7 +38,6 @@ package unix
#include
#include
-#include
enum {
sizeofPtr = sizeof(void*),
@@ -224,6 +228,9 @@ type Flock_t C.struct_flock64
// Statfs
+type Fsid_t C.struct_fsid_t
+type Fsid64_t C.struct_fsid64_t
+
type Statfs_t C.struct_statfs
const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
index 17c1537f2..4b7b96502 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
@@ -867,6 +867,9 @@ const (
TAB2 = 0x800
TAB3 = 0xc00
TABDLY = 0xc00
+ TCFLSH = 0x540c
+ TCGETA = 0x5405
+ TCGETS = 0x5401
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -915,6 +918,15 @@ const (
TCP_TIMESTAMP_OPTLEN = 0xc
TCP_UNSETPRIV = 0x28
TCSAFLUSH = 0x2
+ TCSBRK = 0x5409
+ TCSETA = 0x5406
+ TCSETAF = 0x5408
+ TCSETAW = 0x5407
+ TCSETS = 0x5402
+ TCSETSF = 0x5404
+ TCSETSW = 0x5403
+ TCXONC = 0x540b
+ TIOC = 0x5400
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0x80047462
diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
index fdfd25ccd..ed04fd1b7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
@@ -867,6 +867,9 @@ const (
TAB2 = 0x800
TAB3 = 0xc00
TABDLY = 0xc00
+ TCFLSH = 0x540c
+ TCGETA = 0x5405
+ TCGETS = 0x5401
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -915,6 +918,15 @@ const (
TCP_TIMESTAMP_OPTLEN = 0xc
TCP_UNSETPRIV = 0x28
TCSAFLUSH = 0x2
+ TCSBRK = 0x5409
+ TCSETA = 0x5406
+ TCSETAF = 0x5408
+ TCSETAW = 0x5407
+ TCSETS = 0x5402
+ TCSETSF = 0x5404
+ TCSETSW = 0x5403
+ TCXONC = 0x540b
+ TIOC = 0x5400
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0xffffffff80047462
diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
index 1de699894..bbe6089bb 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
@@ -880,6 +880,40 @@ const (
MAP_VPAGETABLE = 0x2000
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_AUTOMOUNTED = 0x20
+ MNT_CMDFLAGS = 0xf0000
+ MNT_DEFEXPORTED = 0x200
+ MNT_DELEXPORT = 0x20000
+ MNT_EXKERB = 0x800
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXPUBLIC = 0x20000000
+ MNT_EXRDONLY = 0x80
+ MNT_FORCE = 0x80000
+ MNT_IGNORE = 0x800000
+ MNT_LAZY = 0x4
+ MNT_LOCAL = 0x1000
+ MNT_NOATIME = 0x10000000
+ MNT_NOCLUSTERR = 0x40000000
+ MNT_NOCLUSTERW = 0x80000000
+ MNT_NODEV = 0x10
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOSYMFOLLOW = 0x400000
+ MNT_NOWAIT = 0x2
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x200000
+ MNT_SUIDDIR = 0x100000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_TRIM = 0x1000000
+ MNT_UPDATE = 0x10000
+ MNT_USER = 0x8000
+ MNT_VISFLAGMASK = 0xf1f0ffff
+ MNT_WAIT = 0x1
MSG_CMSG_CLOEXEC = 0x1000
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index f33613ee9..fe564160b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -977,6 +978,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1796,6 +1812,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2278,6 +2295,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index ccdbebf6b..dcfa66749 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -977,6 +978,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1797,6 +1813,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2278,6 +2295,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 770d4c735..c2ef50c65 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1803,6 +1819,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2284,6 +2301,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index c3e311dbb..1a820d668 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -978,6 +979,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1787,6 +1803,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2269,6 +2286,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 9fee271de..b515b2a63 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1796,6 +1812,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@@ -2280,6 +2297,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index d022427be..29a88f0e5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1796,6 +1812,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@@ -2280,6 +2297,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 509faae72..0767ac1b0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1796,6 +1812,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@@ -2280,6 +2297,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 340f78afc..269b81318 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1796,6 +1812,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
@@ -2280,6 +2297,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 4e80d6aaf..eb52e9689 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -974,6 +975,21 @@ const (
MCL_CURRENT = 0x2000
MCL_FUTURE = 0x4000
MCL_ONFAULT = 0x8000
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1852,6 +1868,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2337,6 +2354,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4000
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0xc00
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index f22c0d68c..0563d34b3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -974,6 +975,21 @@ const (
MCL_CURRENT = 0x2000
MCL_FUTURE = 0x4000
MCL_ONFAULT = 0x8000
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1852,6 +1868,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2337,6 +2354,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4000
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0xc00
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 0978dba1e..e95e3f677 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1784,6 +1800,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2265,6 +2282,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 5a057da42..bad17418e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -64,6 +64,7 @@ const (
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ AF_XDP = 0x2c
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
@@ -975,6 +976,21 @@ const (
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
+ MFD_ALLOW_SEALING = 0x2
+ MFD_CLOEXEC = 0x1
+ MFD_HUGETLB = 0x4
+ MFD_HUGE_16GB = -0x78000000
+ MFD_HUGE_16MB = 0x60000000
+ MFD_HUGE_1GB = 0x78000000
+ MFD_HUGE_1MB = 0x50000000
+ MFD_HUGE_256MB = 0x70000000
+ MFD_HUGE_2GB = 0x7c000000
+ MFD_HUGE_2MB = 0x54000000
+ MFD_HUGE_512KB = 0x4c000000
+ MFD_HUGE_64KB = 0x40000000
+ MFD_HUGE_8MB = 0x5c000000
+ MFD_HUGE_MASK = 0x3f
+ MFD_HUGE_SHIFT = 0x1a
MINIX2_SUPER_MAGIC = 0x2468
MINIX2_SUPER_MAGIC2 = 0x2478
MINIX3_SUPER_MAGIC = 0x4d5a
@@ -1857,6 +1873,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_X25 = 0x106
+ SOL_XDP = 0x11b
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
@@ -2338,6 +2355,26 @@ const (
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
+ XDP_COPY = 0x2
+ XDP_FLAGS_DRV_MODE = 0x4
+ XDP_FLAGS_HW_MODE = 0x8
+ XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_SKB_MODE = 0x2
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
+ XDP_MMAP_OFFSETS = 0x1
+ XDP_PGOFF_RX_RING = 0x0
+ XDP_PGOFF_TX_RING = 0x80000000
+ XDP_RX_RING = 0x2
+ XDP_SHARED_UMEM = 0x1
+ XDP_STATISTICS = 0x7
+ XDP_TX_RING = 0x3
+ XDP_UMEM_COMPLETION_RING = 0x6
+ XDP_UMEM_FILL_RING = 0x5
+ XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
+ XDP_UMEM_PGOFF_FILL_RING = 0x100000000
+ XDP_UMEM_REG = 0x4
+ XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974
XTABS = 0x1800
ZSMALLOC_MAGIC = 0x58295829
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
index 19316b1d0..11472b759 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
@@ -1020,6 +1020,43 @@ const (
MAP_WIRED = 0x800
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_BASIC_FLAGS = 0xe782807f
+ MNT_DEFEXPORTED = 0x200
+ MNT_DISCARD = 0x800000
+ MNT_EXKERB = 0x800
+ MNT_EXNORESPORT = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXPUBLIC = 0x10000000
+ MNT_EXRDONLY = 0x80
+ MNT_EXTATTR = 0x1000000
+ MNT_FORCE = 0x80000
+ MNT_GETARGS = 0x400000
+ MNT_IGNORE = 0x100000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_LOG = 0x2000000
+ MNT_NOATIME = 0x4000000
+ MNT_NOCOREDUMP = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NODEVMTIME = 0x40000000
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_OP_FLAGS = 0x4d0000
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELATIME = 0x20000
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x80000000
+ MNT_SYMPERM = 0x20000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UNION = 0x20
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0xff90ffff
+ MNT_WAIT = 0x1
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
index f2cf500f5..b207e1cf3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
@@ -1010,6 +1010,43 @@ const (
MAP_WIRED = 0x800
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_BASIC_FLAGS = 0xe782807f
+ MNT_DEFEXPORTED = 0x200
+ MNT_DISCARD = 0x800000
+ MNT_EXKERB = 0x800
+ MNT_EXNORESPORT = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXPUBLIC = 0x10000000
+ MNT_EXRDONLY = 0x80
+ MNT_EXTATTR = 0x1000000
+ MNT_FORCE = 0x80000
+ MNT_GETARGS = 0x400000
+ MNT_IGNORE = 0x100000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_LOG = 0x2000000
+ MNT_NOATIME = 0x4000000
+ MNT_NOCOREDUMP = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NODEVMTIME = 0x40000000
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_OP_FLAGS = 0x4d0000
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELATIME = 0x20000
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x80000000
+ MNT_SYMPERM = 0x20000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UNION = 0x20
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0xff90ffff
+ MNT_WAIT = 0x1
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
index 858e29998..fb3ff9bbd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
@@ -1000,6 +1000,43 @@ const (
MAP_STACK = 0x2000
MAP_TRYFIXED = 0x400
MAP_WIRED = 0x800
+ MNT_ASYNC = 0x40
+ MNT_BASIC_FLAGS = 0xe782807f
+ MNT_DEFEXPORTED = 0x200
+ MNT_DISCARD = 0x800000
+ MNT_EXKERB = 0x800
+ MNT_EXNORESPORT = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXPUBLIC = 0x10000000
+ MNT_EXRDONLY = 0x80
+ MNT_EXTATTR = 0x1000000
+ MNT_FORCE = 0x80000
+ MNT_GETARGS = 0x400000
+ MNT_IGNORE = 0x100000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_LOG = 0x2000000
+ MNT_NOATIME = 0x4000000
+ MNT_NOCOREDUMP = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NODEVMTIME = 0x40000000
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_OP_FLAGS = 0x4d0000
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELATIME = 0x20000
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x80000000
+ MNT_SYMPERM = 0x20000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UNION = 0x20
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0xff90ffff
+ MNT_WAIT = 0x1
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
index 7d92f2c53..d8be04518 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
@@ -899,6 +899,32 @@ const (
MAP_TRYFIXED = 0x400
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_DEFEXPORTED = 0x200
+ MNT_DELEXPORT = 0x20000
+ MNT_DOOMED = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXRDONLY = 0x80
+ MNT_FORCE = 0x80000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_NOATIME = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x4000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0x400ffff
+ MNT_WAIT = 0x1
+ MNT_WANTRDWR = 0x2000000
+ MNT_WXALLOWED = 0x800
MSG_BCAST = 0x100
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
index b0a7ebafc..12261a562 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
@@ -939,6 +939,32 @@ const (
MAP_TRYFIXED = 0x0
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_DEFEXPORTED = 0x200
+ MNT_DELEXPORT = 0x20000
+ MNT_DOOMED = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXRDONLY = 0x80
+ MNT_FORCE = 0x80000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_NOATIME = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x4000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0x400ffff
+ MNT_WAIT = 0x1
+ MNT_WANTRDWR = 0x2000000
+ MNT_WXALLOWED = 0x800
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
index 50c1d9f35..79d5695c3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
@@ -899,6 +899,32 @@ const (
MAP_TRYFIXED = 0x0
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_DEFEXPORTED = 0x200
+ MNT_DELEXPORT = 0x20000
+ MNT_DOOMED = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXRDONLY = 0x80
+ MNT_FORCE = 0x80000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_NOATIME = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NOEXEC = 0x4
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x4000000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0x400ffff
+ MNT_WAIT = 0x1
+ MNT_WANTRDWR = 0x2000000
+ MNT_WXALLOWED = 0x800
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CTRUNC = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
index 46d0652fb..ab2f76122 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
@@ -39,8 +39,10 @@ int getrusage(int, uintptr_t);
int getsid(int);
int kill(int, int);
int syslog(int, uintptr_t, size_t);
+int mkdir(int, uintptr_t, unsigned int);
int mkdirat(int, uintptr_t, unsigned int);
int mkfifo(uintptr_t, unsigned int);
+int mknod(uintptr_t, unsigned int, int);
int mknodat(int, uintptr_t, unsigned int, int);
int nanosleep(uintptr_t, uintptr_t);
int open64(uintptr_t, int, unsigned int);
@@ -502,6 +504,17 @@ func Klogctl(typ int, buf []byte) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mkdir(dirfd int, path string, mode uint32) (err error) {
+ _p0 := uintptr(unsafe.Pointer(C.CString(path)))
+ r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
@@ -524,6 +537,17 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mknod(path string, mode uint32, dev int) (err error) {
+ _p0 := uintptr(unsafe.Pointer(C.CString(path)))
+ r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev))
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev))
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
index b33f9eccf..2e4f93fb1 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
@@ -39,8 +39,10 @@ int getrusage(int, uintptr_t);
int getsid(int);
int kill(int, int);
int syslog(int, uintptr_t, size_t);
+int mkdir(int, uintptr_t, unsigned int);
int mkdirat(int, uintptr_t, unsigned int);
int mkfifo(uintptr_t, unsigned int);
+int mknod(uintptr_t, unsigned int, int);
int mknodat(int, uintptr_t, unsigned int, int);
int nanosleep(uintptr_t, uintptr_t);
int open64(uintptr_t, int, unsigned int);
@@ -502,6 +504,17 @@ func Klogctl(typ int, buf []byte) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mkdir(dirfd int, path string, mode uint32) (err error) {
+ _p0 := uintptr(unsafe.Pointer(C.CString(path)))
+ r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))
@@ -524,6 +537,17 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mknod(path string, mode uint32, dev int) (err error) {
+ _p0 := uintptr(unsafe.Pointer(C.CString(path)))
+ r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev))
+ if r0 == -1 && er != nil {
+ err = er
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev))
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 8e8d427d6..a1c7d785f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index 2f60780ca..47503919a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index d29a11c94..389c42af3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index d03eb2968..97f6a2c56 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index dea09328b..889eaf1a1 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 31bbcff44..06016870e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index e025a33aa..76dc324b8 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index 57d7d931d..a8428e923 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index f3fae1d14..1cb8eb4e1 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index 011b0a53b..5f0cb9d82 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
index b086f7ed7..6977f045d 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index c637da95f..2bfd3d0c4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -979,6 +979,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func MemfdCreate(name string, flags int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
index ee455365f..f1cfe7db1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
@@ -268,9 +268,22 @@ type Termios struct {
Cc [16]uint8
}
-type Termio struct{}
+type Termio struct {
+ Iflag uint16
+ Oflag uint16
+ Cflag uint16
+ Lflag uint16
+ Line uint8
+ Cc [8]uint8
+ _ [1]byte
+}
-type Winsize struct{}
+type Winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
type PollFd struct {
Fd int32
@@ -301,6 +314,32 @@ type Flock_t struct {
Len int64
}
-type Statfs_t struct{}
+type Fsid_t struct {
+ Val [2]uint32
+}
+type Fsid64_t struct {
+ Val [2]uint64
+}
+
+type Statfs_t struct {
+ Version int32
+ Type int32
+ Bsize uint32
+ Blocks uint32
+ Bfree uint32
+ Bavail uint32
+ Files uint32
+ Ffree uint32
+ Fsid Fsid_t
+ Vfstype int32
+ Fsize uint32
+ Vfsnumber int32
+ Vfsoff int32
+ Vfslen int32
+ Vfsvers int32
+ Fname [32]uint8
+ Fpack [32]uint8
+ Name_max int32
+}
const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
index cdb1699d5..95581a3bc 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
@@ -275,9 +275,22 @@ type Termios struct {
Cc [16]uint8
}
-type Termio struct{}
+type Termio struct {
+ Iflag uint16
+ Oflag uint16
+ Cflag uint16
+ Lflag uint16
+ Line uint8
+ Cc [8]uint8
+ _ [1]byte
+}
-type Winsize struct{}
+type Winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
type PollFd struct {
Fd int32
@@ -308,6 +321,34 @@ type Flock_t struct {
Len int64
}
-type Statfs_t struct{}
+type Fsid_t struct {
+ Val [2]uint32
+}
+type Fsid64_t struct {
+ Val [2]uint64
+}
+
+type Statfs_t struct {
+ Version int32
+ Type int32
+ Bsize uint64
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid Fsid64_t
+ Vfstype int32
+ _ [4]byte
+ Fsize uint64
+ Vfsnumber int32
+ Vfsoff int32
+ Vfslen int32
+ Vfsvers int32
+ Fname [32]uint8
+ Fpack [32]uint8
+ Name_max int32
+ _ [4]byte
+}
const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 1944dfb1a..793b3fdd2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -278,6 +278,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -412,6 +420,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x8
SizeofIPMreq = 0x8
@@ -517,6 +526,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1889,3 +1905,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index dd09289d1..c3548848a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -280,6 +280,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -416,6 +424,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -521,6 +530,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1911,3 +1927,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index d9e844d8c..c7b699432 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -281,6 +281,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -415,6 +423,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x8
SizeofIPMreq = 0x8
@@ -520,6 +529,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1879,3 +1895,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index fd2471a3c..2339b2129 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -281,6 +281,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -417,6 +425,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -522,6 +531,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1890,3 +1906,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 89218d9d1..013462ba5 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -279,6 +279,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -413,6 +421,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x8
SizeofIPMreq = 0x8
@@ -518,6 +527,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1884,3 +1900,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 059903972..86f0ab552 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -281,6 +281,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -417,6 +425,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -522,6 +531,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1892,3 +1908,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index c933cea67..007537b4d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -281,6 +281,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -417,6 +425,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -522,6 +531,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1892,3 +1908,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 92d358ae1..fc4a15912 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -279,6 +279,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -413,6 +421,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x8
SizeofIPMreq = 0x8
@@ -518,6 +527,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1884,3 +1900,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 2dbda8439..377e9efdf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -282,6 +282,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -418,6 +426,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -523,6 +532,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1900,3 +1916,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index b1d235703..595ba6303 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -282,6 +282,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -418,6 +426,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -523,6 +532,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1900,3 +1916,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 0a34ccdd7..0ccf5bc3e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -281,6 +281,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -417,6 +425,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -522,6 +531,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1917,3 +1933,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 0c790546b..06b07852d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -280,6 +280,14 @@ type RawSockaddrVM struct {
Zero [4]uint8
}
+type RawSockaddrXDP struct {
+ Family uint16
+ Flags uint16
+ Ifindex uint32
+ Queue_id uint32
+ Shared_umem_fd uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -416,6 +424,7 @@ const (
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
+ SizeofSockaddrXDP = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
@@ -521,6 +530,13 @@ const (
RTA_PREF = 0x14
RTA_ENCAP_TYPE = 0x15
RTA_ENCAP = 0x16
+ RTA_EXPIRES = 0x17
+ RTA_PAD = 0x18
+ RTA_UID = 0x19
+ RTA_TTL_PROPAGATE = 0x1a
+ RTA_IP_PROTO = 0x1b
+ RTA_SPORT = 0x1c
+ RTA_DPORT = 0x1d
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
@@ -1917,3 +1933,35 @@ const (
NETNSA_PID = 0x2
NETNSA_FD = 0x3
)
+
+type XDPRingOffset struct {
+ Producer uint64
+ Consumer uint64
+ Desc uint64
+}
+
+type XDPMmapOffsets struct {
+ Rx XDPRingOffset
+ Tx XDPRingOffset
+ Fr XDPRingOffset
+ Cr XDPRingOffset
+}
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+}
+
+type XDPStatistics struct {
+ Rx_dropped uint64
+ Rx_invalid_descs uint64
+ Tx_invalid_descs uint64
+}
+
+type XDPDesc struct {
+ Addr uint64
+ Len uint32
+ Options uint32
+}
diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS
index fe55ebff0..bf7d94f71 100644
--- a/vendor/google.golang.org/api/CONTRIBUTORS
+++ b/vendor/google.golang.org/api/CONTRIBUTORS
@@ -44,11 +44,8 @@ Ivan Krasin
Jason Hall
Johan Euphrosine
Kostik Shtoyk
-Kunpei Sakai
-Matthew Whisenhunt
Michael McGreevy
Nick Craig-Wood
-Robbie Trencheny
Ross Light
Sarah Adams
Scott Van Woudenberg
diff --git a/vendor/google.golang.org/api/gensupport/go18.go b/vendor/google.golang.org/api/gensupport/go18.go
deleted file mode 100644
index c76cb8f20..000000000
--- a/vendor/google.golang.org/api/gensupport/go18.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.8
-
-package gensupport
-
-import (
- "io"
- "net/http"
-)
-
-// SetGetBody sets the GetBody field of req to f.
-func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
- req.GetBody = f
-}
diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go
index 837785081..cb02335d2 100644
--- a/vendor/google.golang.org/api/gensupport/jsonfloat.go
+++ b/vendor/google.golang.org/api/gensupport/jsonfloat.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google LLC
+// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go
index 5895fef88..f3e77fc52 100644
--- a/vendor/google.golang.org/api/gensupport/media.go
+++ b/vendor/google.golang.org/api/gensupport/media.go
@@ -5,15 +5,12 @@
package gensupport
import (
- "bytes"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
- "strings"
- "sync"
"google.golang.org/api/googleapi"
)
@@ -106,13 +103,12 @@ type typeReader struct {
typ string
}
-// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body.
+// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body.
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
type multipartReader struct {
pr *io.PipeReader
- ctype string
- mu sync.Mutex
pipeOpen bool
+ ctype string
}
func newMultipartReader(parts []typeReader) *multipartReader {
@@ -148,13 +144,10 @@ func (mp *multipartReader) Read(data []byte) (n int, err error) {
}
func (mp *multipartReader) Close() error {
- mp.mu.Lock()
if !mp.pipeOpen {
- mp.mu.Unlock()
return nil
}
mp.pipeOpen = false
- mp.mu.Unlock()
return mp.pr.Close()
}
@@ -258,11 +251,11 @@ func (mi *MediaInfo) UploadType() string {
}
// UploadRequest sets up an HTTP request for media upload. It adds headers
-// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
-func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
+// as necessary, and returns a replacement for the body.
+func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, cleanup func()) {
cleanup = func() {}
if mi == nil {
- return body, nil, cleanup
+ return body, cleanup
}
var media io.Reader
if mi.media != nil {
@@ -276,17 +269,7 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB
media, _, _, _ = mi.buffer.Chunk()
}
if media != nil {
- fb := readerFunc(body)
- fm := readerFunc(media)
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
- if fb != nil && fm != nil {
- getBody = func() (io.ReadCloser, error) {
- rb := ioutil.NopCloser(fb())
- rm := ioutil.NopCloser(fm())
- r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType)
- return r, nil
- }
- }
cleanup = func() { combined.Close() }
reqHeaders.Set("Content-Type", ctype)
body = combined
@@ -294,27 +277,7 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
reqHeaders.Set("X-Upload-Content-Type", mi.mType)
}
- return body, getBody, cleanup
-}
-
-// readerFunc returns a function that always returns an io.Reader that has the same
-// contents as r, provided that can be done without consuming r. Otherwise, it
-// returns nil.
-// See http.NewRequest (in net/http/request.go).
-func readerFunc(r io.Reader) func() io.Reader {
- switch r := r.(type) {
- case *bytes.Buffer:
- buf := r.Bytes()
- return func() io.Reader { return bytes.NewReader(buf) }
- case *bytes.Reader:
- snapshot := *r
- return func() io.Reader { r := snapshot; return &r }
- case *strings.Reader:
- snapshot := *r
- return func() io.Reader { r := snapshot; return &r }
- default:
- return nil
- }
+ return body, cleanup
}
// ResumableUpload returns an appropriately configured ResumableUpload value if the
diff --git a/vendor/google.golang.org/api/gensupport/not_go18.go b/vendor/google.golang.org/api/gensupport/not_go18.go
deleted file mode 100644
index 2536501ce..000000000
--- a/vendor/google.golang.org/api/gensupport/not_go18.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.8
-
-package gensupport
-
-import (
- "io"
- "net/http"
-)
-
-func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {}
diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go
index e58c75e41..c60b3c394 100644
--- a/vendor/google.golang.org/api/gensupport/retry.go
+++ b/vendor/google.golang.org/api/gensupport/retry.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google LLC
+// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go
index 0f75aa867..092044f44 100644
--- a/vendor/google.golang.org/api/gensupport/send.go
+++ b/vendor/google.golang.org/api/gensupport/send.go
@@ -5,7 +5,6 @@
package gensupport
import (
- "encoding/json"
"errors"
"net/http"
@@ -60,12 +59,3 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*
}
return resp, err
}
-
-// DecodeResponse decodes the body of res into target. If there is no body,
-// target is unchanged.
-func DecodeResponse(target interface{}, res *http.Response) error {
- if res.StatusCode == http.StatusNoContent {
- return nil
- }
- return json.NewDecoder(res.Body).Decode(target)
-}
diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
index c9984458b..f6e15be35 100644
--- a/vendor/google.golang.org/api/googleapi/googleapi.go
+++ b/vendor/google.golang.org/api/googleapi/googleapi.go
@@ -270,20 +270,11 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
func ResolveRelative(basestr, relstr string) string {
u, _ := url.Parse(basestr)
- afterColonPath := ""
- if i := strings.IndexRune(relstr, ':'); i > 0 {
- afterColonPath = relstr[i+1:]
- relstr = relstr[:i]
- }
rel, _ := url.Parse(relstr)
u = u.ResolveReference(rel)
us := u.String()
- if afterColonPath != "" {
- us = fmt.Sprintf("%s:%s", us, afterColonPath)
- }
us = strings.Replace(us, "%7B", "{", -1)
us = strings.Replace(us, "%7D", "}", -1)
- us = strings.Replace(us, "%2A", "*", -1)
return us
}
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
index ec40a076e..b546b63b4 100644
--- a/vendor/google.golang.org/api/internal/creds.go
+++ b/vendor/google.golang.org/api/internal/creds.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google LLC
+// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,28 +15,90 @@
package internal
import (
+ "encoding/json"
"fmt"
"io/ioutil"
+ "time"
"golang.org/x/net/context"
+ "golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
// Creds returns credential information obtained from DialSettings, or if none, then
// it returns default credential information.
func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) {
- if ds.Credentials != nil {
- return ds.Credentials, nil
- }
if ds.CredentialsFile != "" {
- data, err := ioutil.ReadFile(ds.CredentialsFile)
- if err != nil {
- return nil, fmt.Errorf("cannot read credentials file: %v", err)
- }
- return google.CredentialsFromJSON(ctx, data, ds.Scopes...)
+ return credFileTokenSource(ctx, ds.CredentialsFile, ds.Scopes...)
}
if ds.TokenSource != nil {
return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil
}
return google.FindDefaultCredentials(ctx, ds.Scopes...)
}
+
+// credFileTokenSource reads a refresh token file or a service account and returns
+// a TokenSource constructed from the config.
+func credFileTokenSource(ctx context.Context, filename string, scope ...string) (*google.DefaultCredentials, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read credentials file: %v", err)
+ }
+ // See if it is a refresh token credentials file first.
+ ts, ok, err := refreshTokenTokenSource(ctx, data, scope...)
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ return &google.DefaultCredentials{
+ TokenSource: ts,
+ JSON: data,
+ }, nil
+ }
+
+ // If not, it should be a service account.
+ cfg, err := google.JWTConfigFromJSON(data, scope...)
+ if err != nil {
+ return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
+ }
+ // jwt.Config does not expose the project ID, so re-unmarshal to get it.
+ var pid struct {
+ ProjectID string `json:"project_id"`
+ }
+ if err := json.Unmarshal(data, &pid); err != nil {
+ return nil, err
+ }
+ return &google.DefaultCredentials{
+ ProjectID: pid.ProjectID,
+ TokenSource: cfg.TokenSource(ctx),
+ JSON: data,
+ }, nil
+}
+
+func refreshTokenTokenSource(ctx context.Context, data []byte, scope ...string) (oauth2.TokenSource, bool, error) {
+ var c cred
+ if err := json.Unmarshal(data, &c); err != nil {
+ return nil, false, fmt.Errorf("cannot unmarshal credentials file: %v", err)
+ }
+ if c.ClientID == "" || c.ClientSecret == "" || c.RefreshToken == "" || c.Type != "authorized_user" {
+ return nil, false, nil
+ }
+ cfg := &oauth2.Config{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ Endpoint: google.Endpoint,
+ RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
+ Scopes: scope,
+ }
+ return cfg.TokenSource(ctx, &oauth2.Token{
+ RefreshToken: c.RefreshToken,
+ Expiry: time.Now(),
+ }), true, nil
+}
+
+type cred struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RefreshToken string `json:"refresh_token"`
+ Type string `json:"type"`
+}
diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go
index 3bb2c1a2d..4150feb6b 100644
--- a/vendor/google.golang.org/api/internal/pool.go
+++ b/vendor/google.golang.org/api/internal/pool.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google LLC
+// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index d347e5f41..514719178 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google LLC
+// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,11 +16,9 @@
package internal
import (
- "errors"
"net/http"
"golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
"google.golang.org/grpc"
)
@@ -30,33 +28,10 @@ type DialSettings struct {
Endpoint string
Scopes []string
TokenSource oauth2.TokenSource
- Credentials *google.DefaultCredentials
CredentialsFile string // if set, Token Source is ignored.
UserAgent string
APIKey string
HTTPClient *http.Client
GRPCDialOpts []grpc.DialOption
GRPCConn *grpc.ClientConn
- NoAuth bool
-}
-
-// Validate reports an error if ds is invalid.
-func (ds *DialSettings) Validate() error {
- hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil
- if ds.NoAuth && hasCreds {
- return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials")
- }
- // Credentials should not appear with other options.
- // We currently allow TokenSource and CredentialsFile to coexist.
- // TODO(jba): make TokenSource & CredentialsFile an error (breaking change).
- if ds.Credentials != nil && (ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "") {
- return errors.New("multiple credential options provided")
- }
- if ds.HTTPClient != nil && ds.GRPCConn != nil {
- return errors.New("WithHTTPClient is incompatible with WithGRPCConn")
- }
- if ds.HTTPClient != nil && ds.GRPCDialOpts != nil {
- return errors.New("WithHTTPClient is incompatible with gRPC dial options")
- }
- return nil
}
diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go
index 3c8ea7732..0640c8231 100644
--- a/vendor/google.golang.org/api/iterator/iterator.go
+++ b/vendor/google.golang.org/api/iterator/iterator.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google LLC
+// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -67,7 +67,7 @@ type PageInfo struct {
// be silently truncated.
fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
- // Function that returns the number of currently buffered items.
+ // Function that clears the iterator's buffer, returning any currently buffered items.
bufLen func() int
// Function that returns the buffer, after setting the buffer variable to nil.
diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go
deleted file mode 100644
index 30c889a11..000000000
--- a/vendor/google.golang.org/api/option/credentials_go19.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.9
-
-package option
-
-import (
- "golang.org/x/oauth2/google"
- "google.golang.org/api/internal"
-)
-
-type withCreds google.Credentials
-
-func (w *withCreds) Apply(o *internal.DialSettings) {
- o.Credentials = (*google.Credentials)(w)
-}
-
-func WithCredentials(creds *google.Credentials) ClientOption {
- return (*withCreds)(creds)
-}
diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go
deleted file mode 100644
index 74d3a4b5b..000000000
--- a/vendor/google.golang.org/api/option/credentials_notgo19.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.9
-
-package option
-
-import (
- "golang.org/x/oauth2/google"
- "google.golang.org/api/internal"
-)
-
-type withCreds google.DefaultCredentials
-
-func (w *withCreds) Apply(o *internal.DialSettings) {
- o.Credentials = (*google.DefaultCredentials)(w)
-}
-
-func WithCredentials(creds *google.DefaultCredentials) ClientOption {
- return (*withCreds)(creds)
-}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
index 8757868f1..e3080e38d 100644
--- a/vendor/google.golang.org/api/option/option.go
+++ b/vendor/google.golang.org/api/option/option.go
@@ -1,4 +1,4 @@
-// Copyright 2017 Google LLC
+// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -153,9 +153,6 @@ func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
// WithAPIKey returns a ClientOption that specifies an API key to be used
// as the basis for authentication.
-//
-// API Keys can only be used for JSON-over-HTTP APIs, including those under
-// the import path google.golang.org/api/....
func WithAPIKey(apiKey string) ClientOption {
return withAPIKey(apiKey)
}
@@ -163,16 +160,3 @@ func WithAPIKey(apiKey string) ClientOption {
type withAPIKey string
func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
-
-// WithoutAuthentication returns a ClientOption that specifies that no
-// authentication should be used. It is suitable only for testing and for
-// accessing public resources, like public Google Cloud Storage buckets.
-// It is an error to provide both WithoutAuthentication and any of WithAPIKey,
-// WithTokenSource, WithCredentialsFile or WithServiceAccountFile.
-func WithoutAuthentication() ClientOption {
- return withoutAuthentication{}
-}
-
-type withoutAuthentication struct{}
-
-func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true }
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 1ea73eca1..d5eb39a44 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -1,3798 +1,3711 @@
{
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/cloud-platform": {
- "description": "View and manage your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/cloud-platform.read-only": {
- "description": "View your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/devstorage.full_control": {
- "description": "Manage your data and permissions in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_only": {
- "description": "View your data in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_write": {
- "description": "Manage your data in Google Cloud Storage"
- }
- }
+ "kind": "discovery#restDescription",
+ "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/aAU6-GJtzQTwC546w_DsCPIRIUA\"",
+ "discoveryVersion": "v1",
+ "id": "storage:v1",
+ "name": "storage",
+ "version": "v1",
+ "revision": "20170915",
+ "title": "Cloud Storage JSON API",
+ "description": "Stores and retrieves potentially large, immutable data objects.",
+ "ownerDomain": "google.com",
+ "ownerName": "Google",
+ "icons": {
+ "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
+ "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
+ },
+ "documentationLink": "https://developers.google.com/storage/docs/json_api/",
+ "labels": [
+ "labs"
+ ],
+ "protocol": "rest",
+ "baseUrl": "https://www.googleapis.com/storage/v1/",
+ "basePath": "/storage/v1/",
+ "rootUrl": "https://www.googleapis.com/",
+ "servicePath": "storage/v1/",
+ "batchPath": "batch",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
+ "location": "query"
+ },
+ "userIp": {
+ "type": "string",
+ "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
+ "location": "query"
+ }
+ },
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://www.googleapis.com/auth/cloud-platform": {
+ "description": "View and manage your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/cloud-platform.read-only": {
+ "description": "View your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/devstorage.full_control": {
+ "description": "Manage your data and permissions in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_only": {
+ "description": "View your data in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_write": {
+ "description": "Manage your data in Google Cloud Storage"
}
- },
- "basePath": "/storage/v1/",
- "baseUrl": "https://www.googleapis.com/storage/v1/",
- "batchPath": "batch/storage/v1",
- "description": "Stores and retrieves potentially large, immutable data objects.",
- "discoveryVersion": "v1",
- "documentationLink": "https://developers.google.com/storage/docs/json_api/",
- "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/akd8i6K-8A6ohXFVzxQZomL5PpA\"",
- "icons": {
- "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
- "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
- },
- "id": "storage:v1",
- "kind": "discovery#restDescription",
- "labels": [
- "labs"
- ],
- "name": "storage",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "parameters": {
- "alt": {
- "default": "json",
- "description": "Data format for the response.",
- "enum": [
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of application/json"
- ],
- "location": "query",
- "type": "string"
+ }
+ }
+ },
+ "schemas": {
+ "Bucket": {
+ "id": "Bucket",
+ "type": "object",
+ "description": "A bucket.",
+ "properties": {
+ "acl": {
+ "type": "array",
+ "description": "Access controls on the bucket.",
+ "items": {
+ "$ref": "BucketAccessControl"
+ },
+ "annotations": {
+ "required": [
+ "storage.buckets.update"
+ ]
+ }
},
- "fields": {
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query",
- "type": "string"
- },
- "key": {
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query",
- "type": "string"
- },
- "oauth_token": {
- "description": "OAuth 2.0 token for the current user.",
- "location": "query",
- "type": "string"
- },
- "prettyPrint": {
- "default": "true",
- "description": "Returns response with indentations and line breaks.",
- "location": "query",
- "type": "boolean"
- },
- "quotaUser": {
- "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.",
- "location": "query",
- "type": "string"
- },
- "userIp": {
- "description": "Deprecated. Please use quotaUser instead.",
- "location": "query",
- "type": "string"
- }
- },
- "protocol": "rest",
- "resources": {
- "bucketAccessControls": {
- "methods": {
- "delete": {
- "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "DELETE",
- "id": "storage.bucketAccessControls.delete",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "description": "Returns the ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.bucketAccessControls.get",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "description": "Creates a new ACL entry on the specified bucket.",
- "httpMethod": "POST",
- "id": "storage.bucketAccessControls.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl",
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "description": "Retrieves ACL entries on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.bucketAccessControls.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl",
- "response": {
- "$ref": "BucketAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "description": "Patches an ACL entry on the specified bucket.",
- "httpMethod": "PATCH",
- "id": "storage.bucketAccessControls.patch",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "description": "Updates an ACL entry on the specified bucket.",
- "httpMethod": "PUT",
- "id": "storage.bucketAccessControls.update",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
+ "billing": {
+ "type": "object",
+ "description": "The bucket's billing configuration.",
+ "properties": {
+ "requesterPays": {
+ "type": "boolean",
+ "description": "When set to true, bucket is requester pays."
}
+ }
},
- "buckets": {
- "methods": {
- "delete": {
- "description": "Permanently deletes an empty bucket.",
- "httpMethod": "DELETE",
- "id": "storage.buckets.delete",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "If set, only deletes the bucket if its metageneration matches this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "If set, only deletes the bucket if its metageneration does not match this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "Returns metadata for the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.buckets.get",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "getIamPolicy": {
- "description": "Returns an IAM policy for the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.buckets.getIamPolicy",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/iam",
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Creates a new bucket.",
- "httpMethod": "POST",
- "id": "storage.buckets.insert",
- "parameterOrder": [
- "project"
- ],
- "parameters": {
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query",
- "type": "string"
- },
- "predefinedDefaultObjectAcl": {
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "A valid API project identifier.",
- "location": "query",
- "required": true,
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b",
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "list": {
- "description": "Retrieves a list of buckets for a given project.",
- "httpMethod": "GET",
- "id": "storage.buckets.list",
- "parameterOrder": [
- "project"
- ],
- "parameters": {
- "maxResults": {
- "default": "1000",
- "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to buckets whose names begin with this prefix.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "A valid API project identifier.",
- "location": "query",
- "required": true,
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b",
- "response": {
- "$ref": "Buckets"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "lockRetentionPolicy": {
- "description": "Locks retention policy on a bucket.",
- "httpMethod": "POST",
- "id": "storage.buckets.lockRetentionPolicy",
- "parameterOrder": [
- "bucket",
- "ifMetagenerationMatch"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/lockRetentionPolicy",
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "patch": {
- "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.",
- "httpMethod": "PATCH",
- "id": "storage.buckets.patch",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query",
- "type": "string"
- },
- "predefinedDefaultObjectAcl": {
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "setIamPolicy": {
- "description": "Updates an IAM policy for the specified bucket.",
- "httpMethod": "PUT",
- "id": "storage.buckets.setIamPolicy",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/iam",
- "request": {
- "$ref": "Policy"
- },
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "testIamPermissions": {
- "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.",
- "httpMethod": "GET",
- "id": "storage.buckets.testIamPermissions",
- "parameterOrder": [
- "bucket",
- "permissions"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "permissions": {
- "description": "Permissions to test.",
- "location": "query",
- "repeated": true,
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/iam/testPermissions",
- "response": {
- "$ref": "TestIamPermissionsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "update": {
- "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
- "httpMethod": "PUT",
- "id": "storage.buckets.update",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query",
- "type": "string"
- },
- "predefinedDefaultObjectAcl": {
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
+ "cors": {
+ "type": "array",
+ "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "maxAgeSeconds": {
+ "type": "integer",
+ "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.",
+ "format": "int32"
+ },
+ "method": {
+ "type": "array",
+ "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".",
+ "items": {
+ "type": "string"
}
- }
- },
- "channels": {
- "methods": {
- "stop": {
- "description": "Stop watching resources through this channel",
- "httpMethod": "POST",
- "id": "storage.channels.stop",
- "path": "channels/stop",
- "request": {
- "$ref": "Channel",
- "parameterName": "resource"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
+ },
+ "origin": {
+ "type": "array",
+ "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".",
+ "items": {
+ "type": "string"
}
- }
- },
- "defaultObjectAccessControls": {
- "methods": {
- "delete": {
- "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "DELETE",
- "id": "storage.defaultObjectAccessControls.delete",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.defaultObjectAccessControls.get",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "description": "Creates a new default object ACL entry on the specified bucket.",
- "httpMethod": "POST",
- "id": "storage.defaultObjectAccessControls.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "description": "Retrieves default object ACL entries on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.defaultObjectAccessControls.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl",
- "response": {
- "$ref": "ObjectAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "description": "Patches a default object ACL entry on the specified bucket.",
- "httpMethod": "PATCH",
- "id": "storage.defaultObjectAccessControls.patch",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "description": "Updates a default object ACL entry on the specified bucket.",
- "httpMethod": "PUT",
- "id": "storage.defaultObjectAccessControls.update",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
+ },
+ "responseHeader": {
+ "type": "array",
+ "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.",
+ "items": {
+ "type": "string"
}
+ }
}
+ }
},
- "notifications": {
- "methods": {
- "delete": {
- "description": "Permanently deletes a notification subscription.",
- "httpMethod": "DELETE",
- "id": "storage.notifications.delete",
- "parameterOrder": [
- "bucket",
- "notification"
- ],
- "parameters": {
- "bucket": {
- "description": "The parent bucket of the notification.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "notification": {
- "description": "ID of the notification to delete.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs/{notification}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "View a notification configuration.",
- "httpMethod": "GET",
- "id": "storage.notifications.get",
- "parameterOrder": [
- "bucket",
- "notification"
- ],
- "parameters": {
- "bucket": {
- "description": "The parent bucket of the notification.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "notification": {
- "description": "Notification ID",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs/{notification}",
- "response": {
- "$ref": "Notification"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Creates a notification subscription for a given bucket.",
- "httpMethod": "POST",
- "id": "storage.notifications.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "The parent bucket of the notification.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs",
- "request": {
- "$ref": "Notification"
- },
- "response": {
- "$ref": "Notification"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "list": {
- "description": "Retrieves a list of notification subscriptions for a given bucket.",
- "httpMethod": "GET",
- "id": "storage.notifications.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a Google Cloud Storage bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs",
- "response": {
- "$ref": "Notifications"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
+ "defaultObjectAcl": {
+ "type": "array",
+ "description": "Default access controls to apply to new objects when no ACL is provided.",
+ "items": {
+ "$ref": "ObjectAccessControl"
+ }
+ },
+ "encryption": {
+ "type": "object",
+ "description": "Encryption configuration used by default for newly inserted objects, when no encryption config is specified.",
+ "properties": {
+ "defaultKmsKeyName": {
+ "type": "string"
}
+ }
},
- "objectAccessControls": {
- "methods": {
- "delete": {
- "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
- "httpMethod": "DELETE",
- "id": "storage.objectAccessControls.delete",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "description": "Returns the ACL entry for the specified entity on the specified object.",
- "httpMethod": "GET",
- "id": "storage.objectAccessControls.get",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "description": "Creates a new ACL entry on the specified object.",
- "httpMethod": "POST",
- "id": "storage.objectAccessControls.insert",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "description": "Retrieves ACL entries on the specified object.",
- "httpMethod": "GET",
- "id": "storage.objectAccessControls.list",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl",
- "response": {
- "$ref": "ObjectAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "description": "Patches an ACL entry on the specified object.",
- "httpMethod": "PATCH",
- "id": "storage.objectAccessControls.patch",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "description": "Updates an ACL entry on the specified object.",
- "httpMethod": "PUT",
- "id": "storage.objectAccessControls.update",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the bucket."
},
- "objects": {
- "methods": {
- "compose": {
- "description": "Concatenates a list of existing objects into a new object in the same bucket.",
- "httpMethod": "POST",
- "id": "storage.objects.compose",
- "parameterOrder": [
- "destinationBucket",
- "destinationObject"
- ],
- "parameters": {
- "destinationBucket": {
- "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationObject": {
- "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationPredefinedAcl": {
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "kmsKeyName": {
- "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{destinationBucket}/o/{destinationObject}/compose",
- "request": {
- "$ref": "ComposeRequest"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "copy": {
- "description": "Copies a source object to a destination object. Optionally overrides metadata.",
- "httpMethod": "POST",
- "id": "storage.objects.copy",
- "parameterOrder": [
- "sourceBucket",
- "sourceObject",
- "destinationBucket",
- "destinationObject"
- ],
- "parameters": {
- "destinationBucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationObject": {
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationPredefinedAcl": {
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "sourceBucket": {
- "description": "Name of the bucket in which to find the source object.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "sourceGeneration": {
- "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "sourceObject": {
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "delete": {
- "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
- "httpMethod": "DELETE",
- "id": "storage.objects.delete",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "Retrieves an object or its metadata.",
- "httpMethod": "GET",
- "id": "storage.objects.get",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true
- },
- "getIamPolicy": {
- "description": "Returns an IAM policy for the specified object.",
- "httpMethod": "GET",
- "id": "storage.objects.getIamPolicy",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/iam",
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Stores a new object and metadata.",
- "httpMethod": "POST",
- "id": "storage.objects.insert",
- "mediaUpload": {
- "accept": [
- "*/*"
- ],
- "protocols": {
- "resumable": {
- "multipart": true,
- "path": "/resumable/upload/storage/v1/b/{bucket}/o"
- },
- "simple": {
- "multipart": true,
- "path": "/upload/storage/v1/b/{bucket}/o"
- }
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "contentEncoding": {
- "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "kmsKeyName": {
- "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- "location": "query",
- "type": "string"
- },
- "name": {
- "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "query",
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaUpload": true
- },
- "list": {
- "description": "Retrieves a list of objects matching the criteria.",
- "httpMethod": "GET",
- "id": "storage.objects.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to look for objects.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "delimiter": {
- "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- "location": "query",
- "type": "string"
- },
- "includeTrailingDelimiter": {
- "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- "location": "query",
- "type": "boolean"
- },
- "maxResults": {
- "default": "1000",
- "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to objects whose names begin with this prefix.",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- },
- "versions": {
- "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- "location": "query",
- "type": "boolean"
- }
- },
- "path": "b/{bucket}/o",
- "response": {
- "$ref": "Objects"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsSubscription": true
- },
- "patch": {
- "description": "Patches an object's metadata.",
- "httpMethod": "PATCH",
- "id": "storage.objects.patch",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request, for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "rewrite": {
- "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
- "httpMethod": "POST",
- "id": "storage.objects.rewrite",
- "parameterOrder": [
- "sourceBucket",
- "sourceObject",
- "destinationBucket",
- "destinationObject"
- ],
- "parameters": {
- "destinationBucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationKmsKeyName": {
- "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- "location": "query",
- "type": "string"
- },
- "destinationObject": {
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationPredefinedAcl": {
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "maxBytesRewrittenPerCall": {
- "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "rewriteToken": {
- "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
- "location": "query",
- "type": "string"
- },
- "sourceBucket": {
- "description": "Name of the bucket in which to find the source object.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "sourceGeneration": {
- "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "sourceObject": {
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "RewriteResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "setIamPolicy": {
- "description": "Updates an IAM policy for the specified object.",
- "httpMethod": "PUT",
- "id": "storage.objects.setIamPolicy",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/iam",
- "request": {
- "$ref": "Policy"
- },
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "testIamPermissions": {
- "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.",
- "httpMethod": "GET",
- "id": "storage.objects.testIamPermissions",
- "parameterOrder": [
- "bucket",
- "object",
- "permissions"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "permissions": {
- "description": "Permissions to test.",
- "location": "query",
- "repeated": true,
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/iam/testPermissions",
- "response": {
- "$ref": "TestIamPermissionsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "update": {
- "description": "Updates an object's metadata.",
- "httpMethod": "PUT",
- "id": "storage.objects.update",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "watchAll": {
- "description": "Watch for changes on all objects in a bucket.",
- "httpMethod": "POST",
- "id": "storage.objects.watchAll",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to look for objects.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "delimiter": {
- "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- "location": "query",
- "type": "string"
- },
- "includeTrailingDelimiter": {
- "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- "location": "query",
- "type": "boolean"
- },
- "maxResults": {
- "default": "1000",
- "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to objects whose names begin with this prefix.",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- },
- "versions": {
- "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- "location": "query",
- "type": "boolean"
- }
- },
- "path": "b/{bucket}/o/watch",
- "request": {
- "$ref": "Channel",
- "parameterName": "resource"
- },
- "response": {
- "$ref": "Channel"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsSubscription": true
- }
- }
+ "id": {
+ "type": "string",
+ "description": "The ID of the bucket. For buckets, the id and name properities are the same."
},
- "projects": {
- "resources": {
- "serviceAccount": {
- "methods": {
- "get": {
- "description": "Get the email address of this project's Google Cloud Storage service account.",
- "httpMethod": "GET",
- "id": "storage.projects.serviceAccount.get",
- "parameterOrder": [
- "projectId"
- ],
- "parameters": {
- "projectId": {
- "description": "Project ID",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "projects/{projectId}/serviceAccount",
- "response": {
- "$ref": "ServiceAccount"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For buckets, this is always storage#bucket.",
+ "default": "storage#bucket"
+ },
+ "labels": {
+ "type": "object",
+ "description": "User-provided labels, in key/value pairs.",
+ "additionalProperties": {
+ "type": "string",
+ "description": "An individual label entry."
+ }
+ },
+ "lifecycle": {
+ "type": "object",
+ "description": "The bucket's lifecycle configuration. See lifecycle management for more information.",
+ "properties": {
+ "rule": {
+ "type": "array",
+ "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "action": {
+ "type": "object",
+ "description": "The action to take.",
+ "properties": {
+ "storageClass": {
+ "type": "string",
+ "description": "Target storage class. Required iff the type of the action is SetStorageClass."
+ },
+ "type": {
+ "type": "string",
+ "description": "Type of the action. Currently, only Delete and SetStorageClass are supported."
+ }
}
+ },
+ "condition": {
+ "type": "object",
+ "description": "The condition(s) under which the action will be taken.",
+ "properties": {
+ "age": {
+ "type": "integer",
+ "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.",
+ "format": "int32"
+ },
+ "createdBefore": {
+ "type": "string",
+ "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.",
+ "format": "date"
+ },
+ "isLive": {
+ "type": "boolean",
+ "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects."
+ },
+ "matchesStorageClass": {
+ "type": "array",
+ "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "numNewerVersions": {
+ "type": "integer",
+ "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
+ "format": "int32"
+ }
+ }
+ }
}
+ }
}
+ }
+ },
+ "location": {
+ "type": "string",
+ "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list."
+ },
+ "logging": {
+ "type": "object",
+ "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.",
+ "properties": {
+ "logBucket": {
+ "type": "string",
+ "description": "The destination bucket where the current bucket's logs should be placed."
+ },
+ "logObjectPrefix": {
+ "type": "string",
+ "description": "A prefix for log object names."
+ }
+ }
+ },
+ "metageneration": {
+ "type": "string",
+ "description": "The metadata generation of this bucket.",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the bucket.",
+ "annotations": {
+ "required": [
+ "storage.buckets.insert"
+ ]
+ }
+ },
+ "owner": {
+ "type": "object",
+ "description": "The owner of the bucket. This is always the project team's owner group.",
+ "properties": {
+ "entity": {
+ "type": "string",
+ "description": "The entity, in the form project-owner-projectId."
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity."
+ }
+ }
+ },
+ "projectNumber": {
+ "type": "string",
+ "description": "The project number of the project the bucket belongs to.",
+ "format": "uint64"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The URI of this bucket."
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes."
+ },
+ "timeCreated": {
+ "type": "string",
+ "description": "The creation time of the bucket in RFC 3339 format.",
+ "format": "date-time"
+ },
+ "updated": {
+ "type": "string",
+ "description": "The modification time of the bucket in RFC 3339 format.",
+ "format": "date-time"
+ },
+ "versioning": {
+ "type": "object",
+ "description": "The bucket's versioning configuration.",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "While set to true, versioning is fully enabled for this bucket."
+ }
+ }
+ },
+ "website": {
+ "type": "object",
+ "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.",
+ "properties": {
+ "mainPageSuffix": {
+ "type": "string",
+ "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages."
+ },
+ "notFoundPage": {
+ "type": "string",
+ "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result."
+ }
+ }
}
+ }
},
- "revision": "20180905",
- "rootUrl": "https://www.googleapis.com/",
- "schemas": {
- "Bucket": {
- "description": "A bucket.",
- "id": "Bucket",
- "properties": {
- "acl": {
- "annotations": {
- "required": [
- "storage.buckets.update"
- ]
- },
- "description": "Access controls on the bucket.",
- "items": {
- "$ref": "BucketAccessControl"
- },
- "type": "array"
- },
- "billing": {
- "description": "The bucket's billing configuration.",
- "properties": {
- "requesterPays": {
- "description": "When set to true, Requester Pays is enabled for this bucket.",
- "type": "boolean"
- }
- },
- "type": "object"
- },
- "cors": {
- "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.",
- "items": {
- "properties": {
- "maxAgeSeconds": {
- "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.",
- "format": "int32",
- "type": "integer"
- },
- "method": {
- "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "origin": {
- "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "responseHeader": {
- "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "defaultEventBasedHold": {
- "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.",
- "type": "boolean"
- },
- "defaultObjectAcl": {
- "description": "Default access controls to apply to new objects when no ACL is provided.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "type": "array"
- },
- "encryption": {
- "description": "Encryption configuration for a bucket.",
- "properties": {
- "defaultKmsKeyName": {
- "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the bucket.",
- "type": "string"
- },
- "id": {
- "description": "The ID of the bucket. For buckets, the id and name properties are the same.",
- "type": "string"
- },
- "kind": {
- "default": "storage#bucket",
- "description": "The kind of item this is. For buckets, this is always storage#bucket.",
- "type": "string"
- },
- "labels": {
- "additionalProperties": {
- "description": "An individual label entry.",
- "type": "string"
- },
- "description": "User-provided labels, in key/value pairs.",
- "type": "object"
- },
- "lifecycle": {
- "description": "The bucket's lifecycle configuration. See lifecycle management for more information.",
- "properties": {
- "rule": {
- "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.",
- "items": {
- "properties": {
- "action": {
- "description": "The action to take.",
- "properties": {
- "storageClass": {
- "description": "Target storage class. Required iff the type of the action is SetStorageClass.",
- "type": "string"
- },
- "type": {
- "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "condition": {
- "description": "The condition(s) under which the action will be taken.",
- "properties": {
- "age": {
- "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.",
- "format": "int32",
- "type": "integer"
- },
- "createdBefore": {
- "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.",
- "format": "date",
- "type": "string"
- },
- "isLive": {
- "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.",
- "type": "boolean"
- },
- "matchesPattern": {
- "description": "A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the \"Early Access\" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.",
- "type": "string"
- },
- "matchesStorageClass": {
- "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "numNewerVersions": {
- "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
- "format": "int32",
- "type": "integer"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "location": {
- "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.",
- "type": "string"
- },
- "logging": {
- "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.",
- "properties": {
- "logBucket": {
- "description": "The destination bucket where the current bucket's logs should be placed.",
- "type": "string"
- },
- "logObjectPrefix": {
- "description": "A prefix for log object names.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "metageneration": {
- "description": "The metadata generation of this bucket.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "annotations": {
- "required": [
- "storage.buckets.insert"
- ]
- },
- "description": "The name of the bucket.",
- "type": "string"
- },
- "owner": {
- "description": "The owner of the bucket. This is always the project team's owner group.",
- "properties": {
- "entity": {
- "description": "The entity, in the form project-owner-projectId.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "projectNumber": {
- "description": "The project number of the project the bucket belongs to.",
- "format": "uint64",
- "type": "string"
- },
- "retentionPolicy": {
- "description": "The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.",
- "properties": {
- "effectiveTime": {
- "description": "Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "isLocked": {
- "description": "Once locked, an object retention policy cannot be modified.",
- "type": "boolean"
- },
- "retentionPeriod": {
- "description": "The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "selfLink": {
- "description": "The URI of this bucket.",
- "type": "string"
- },
- "storageClass": {
- "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.",
- "type": "string"
- },
- "timeCreated": {
- "description": "The creation time of the bucket in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "updated": {
- "description": "The modification time of the bucket in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "versioning": {
- "description": "The bucket's versioning configuration.",
- "properties": {
- "enabled": {
- "description": "While set to true, versioning is fully enabled for this bucket.",
- "type": "boolean"
- }
- },
- "type": "object"
- },
- "website": {
- "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.",
- "properties": {
- "mainPageSuffix": {
- "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.",
- "type": "string"
- },
- "notFoundPage": {
- "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
+ "BucketAccessControl": {
+ "id": "BucketAccessControl",
+ "type": "object",
+ "description": "An access-control entry.",
+ "properties": {
+ "bucket": {
+ "type": "string",
+ "description": "The name of the bucket."
},
- "BucketAccessControl": {
- "description": "An access-control entry.",
- "id": "BucketAccessControl",
- "properties": {
- "bucket": {
- "description": "The name of the bucket.",
- "type": "string"
- },
- "domain": {
- "description": "The domain associated with the entity, if any.",
- "type": "string"
- },
- "email": {
- "description": "The email address associated with the entity, if any.",
- "type": "string"
- },
- "entity": {
- "annotations": {
- "required": [
- "storage.bucketAccessControls.insert"
- ]
- },
- "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity, if any.",
- "type": "string"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the access-control entry.",
- "type": "string"
- },
- "id": {
- "description": "The ID of the access-control entry.",
- "type": "string"
- },
- "kind": {
- "default": "storage#bucketAccessControl",
- "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.",
- "type": "string"
- },
- "projectTeam": {
- "description": "The project team associated with the entity, if any.",
- "properties": {
- "projectNumber": {
- "description": "The project number.",
- "type": "string"
- },
- "team": {
- "description": "The team.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "role": {
- "annotations": {
- "required": [
- "storage.bucketAccessControls.insert"
- ]
- },
- "description": "The access permission for the entity.",
- "type": "string"
- },
- "selfLink": {
- "description": "The link to this access-control entry.",
- "type": "string"
- }
- },
- "type": "object"
+ "domain": {
+ "type": "string",
+ "description": "The domain associated with the entity, if any."
},
- "BucketAccessControls": {
- "description": "An access-control list.",
- "id": "BucketAccessControls",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "BucketAccessControl"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#bucketAccessControls",
- "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.",
- "type": "string"
- }
- },
- "type": "object"
+ "email": {
+ "type": "string",
+ "description": "The email address associated with the entity, if any."
},
- "Buckets": {
- "description": "A list of buckets.",
- "id": "Buckets",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Bucket"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#buckets",
- "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
- "type": "string"
- }
- },
- "type": "object"
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
+ "annotations": {
+ "required": [
+ "storage.bucketAccessControls.insert"
+ ]
+ }
},
- "Channel": {
- "description": "An notification channel used to watch for resource changes.",
- "id": "Channel",
- "properties": {
- "address": {
- "description": "The address where notifications are delivered for this channel.",
- "type": "string"
- },
- "expiration": {
- "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.",
- "format": "int64",
- "type": "string"
- },
- "id": {
- "description": "A UUID or similar unique string that identifies this channel.",
- "type": "string"
- },
- "kind": {
- "default": "api#channel",
- "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".",
- "type": "string"
- },
- "params": {
- "additionalProperties": {
- "description": "Declares a new parameter by name.",
- "type": "string"
- },
- "description": "Additional parameters controlling delivery channel behavior. Optional.",
- "type": "object"
- },
- "payload": {
- "description": "A Boolean value to indicate whether payload is wanted. Optional.",
- "type": "boolean"
- },
- "resourceId": {
- "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.",
- "type": "string"
- },
- "resourceUri": {
- "description": "A version-specific identifier for the watched resource.",
- "type": "string"
- },
- "token": {
- "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.",
- "type": "string"
- },
- "type": {
- "description": "The type of delivery mechanism used for this channel.",
- "type": "string"
- }
- },
- "type": "object"
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity, if any."
},
- "ComposeRequest": {
- "description": "A Compose request.",
- "id": "ComposeRequest",
- "properties": {
- "destination": {
- "$ref": "Object",
- "description": "Properties of the resulting object."
- },
- "kind": {
- "default": "storage#composeRequest",
- "description": "The kind of item this is.",
- "type": "string"
- },
- "sourceObjects": {
- "annotations": {
- "required": [
- "storage.objects.compose"
- ]
- },
- "description": "The list of source objects that will be concatenated into a single object.",
- "items": {
- "properties": {
- "generation": {
- "description": "The generation of this object to use as the source.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "annotations": {
- "required": [
- "storage.objects.compose"
- ]
- },
- "description": "The source object's name. All source objects must reside in the same bucket.",
- "type": "string"
- },
- "objectPreconditions": {
- "description": "Conditions that must be met for this operation to execute.",
- "properties": {
- "ifGenerationMatch": {
- "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "type": "array"
- }
- },
- "type": "object"
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the access-control entry."
},
- "Notification": {
- "description": "A subscription to receive Google PubSub notifications.",
- "id": "Notification",
- "properties": {
- "custom_attributes": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.",
- "type": "object"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for this subscription notification.",
- "type": "string"
- },
- "event_types": {
- "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "id": {
- "description": "The ID of the notification.",
- "type": "string"
- },
- "kind": {
- "default": "storage#notification",
- "description": "The kind of item this is. For notifications, this is always storage#notification.",
- "type": "string"
- },
- "object_name_prefix": {
- "description": "If present, only apply this notification configuration to object names that begin with this prefix.",
- "type": "string"
- },
- "payload_format": {
- "annotations": {
- "required": [
- "storage.notifications.insert"
- ]
- },
- "default": "JSON_API_V1",
- "description": "The desired content of the Payload.",
- "type": "string"
- },
- "selfLink": {
- "description": "The canonical URL of this notification.",
- "type": "string"
- },
- "topic": {
- "annotations": {
- "required": [
- "storage.notifications.insert"
- ]
- },
- "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'",
- "type": "string"
- }
- },
- "type": "object"
+ "id": {
+ "type": "string",
+ "description": "The ID of the access-control entry."
},
- "Notifications": {
- "description": "A list of notification subscriptions.",
- "id": "Notifications",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Notification"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#notifications",
- "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.",
- "type": "string"
- }
- },
- "type": "object"
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.",
+ "default": "storage#bucketAccessControl"
},
- "Object": {
- "description": "An object.",
- "id": "Object",
- "properties": {
- "acl": {
- "annotations": {
- "required": [
- "storage.objects.update"
- ]
- },
- "description": "Access controls on the object.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "type": "array"
- },
- "bucket": {
- "description": "The name of the bucket containing this object.",
- "type": "string"
- },
- "cacheControl": {
- "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.",
- "type": "string"
- },
- "componentCount": {
- "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.",
- "format": "int32",
- "type": "integer"
- },
- "contentDisposition": {
- "description": "Content-Disposition of the object data.",
- "type": "string"
- },
- "contentEncoding": {
- "description": "Content-Encoding of the object data.",
- "type": "string"
- },
- "contentLanguage": {
- "description": "Content-Language of the object data.",
- "type": "string"
- },
- "contentType": {
- "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.",
- "type": "string"
- },
- "crc32c": {
- "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.",
- "type": "string"
- },
- "customerEncryption": {
- "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
- "properties": {
- "encryptionAlgorithm": {
- "description": "The encryption algorithm.",
- "type": "string"
- },
- "keySha256": {
- "description": "SHA256 hash value of the encryption key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the object.",
- "type": "string"
- },
- "eventBasedHold": {
- "description": "Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is the loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false.",
- "type": "boolean"
- },
- "generation": {
- "description": "The content generation of this object. Used for object versioning.",
- "format": "int64",
- "type": "string"
- },
- "id": {
- "description": "The ID of the object, including the bucket name, object name, and generation number.",
- "type": "string"
- },
- "kind": {
- "default": "storage#object",
- "description": "The kind of item this is. For objects, this is always storage#object.",
- "type": "string"
- },
- "kmsKeyName": {
- "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key.",
- "type": "string"
- },
- "md5Hash": {
- "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.",
- "type": "string"
- },
- "mediaLink": {
- "description": "Media download link.",
- "type": "string"
- },
- "metadata": {
- "additionalProperties": {
- "description": "An individual metadata entry.",
- "type": "string"
- },
- "description": "User-provided metadata, in key/value pairs.",
- "type": "object"
- },
- "metageneration": {
- "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "description": "The name of the object. Required if not specified by URL parameter.",
- "type": "string"
- },
- "owner": {
- "description": "The owner of the object. This will always be the uploader of the object.",
- "properties": {
- "entity": {
- "description": "The entity, in the form user-userId.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "retentionExpirationTime": {
- "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).",
- "format": "date-time",
- "type": "string"
- },
- "selfLink": {
- "description": "The link to this object.",
- "type": "string"
- },
- "size": {
- "description": "Content-Length of the data in bytes.",
- "format": "uint64",
- "type": "string"
- },
- "storageClass": {
- "description": "Storage class of the object.",
- "type": "string"
- },
- "temporaryHold": {
- "description": "Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing. Note that unlike event-based hold, temporary hold does not impact retention expiration time of an object.",
- "type": "boolean"
- },
- "timeCreated": {
- "description": "The creation time of the object in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "timeDeleted": {
- "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
- "format": "date-time",
- "type": "string"
- },
- "timeStorageClassUpdated": {
- "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.",
- "format": "date-time",
- "type": "string"
- },
- "updated": {
- "description": "The modification time of the object metadata in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- }
+ "projectTeam": {
+ "type": "object",
+ "description": "The project team associated with the entity, if any.",
+ "properties": {
+ "projectNumber": {
+ "type": "string",
+ "description": "The project number."
},
- "type": "object"
+ "team": {
+ "type": "string",
+ "description": "The team."
+ }
+ }
},
- "ObjectAccessControl": {
- "description": "An access-control entry.",
- "id": "ObjectAccessControl",
- "properties": {
- "bucket": {
- "description": "The name of the bucket.",
- "type": "string"
- },
- "domain": {
- "description": "The domain associated with the entity, if any.",
- "type": "string"
- },
- "email": {
- "description": "The email address associated with the entity, if any.",
- "type": "string"
- },
- "entity": {
- "annotations": {
- "required": [
- "storage.defaultObjectAccessControls.insert",
- "storage.objectAccessControls.insert"
- ]
- },
- "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity, if any.",
- "type": "string"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the access-control entry.",
- "type": "string"
- },
- "generation": {
- "description": "The content generation of the object, if applied to an object.",
- "format": "int64",
- "type": "string"
- },
- "id": {
- "description": "The ID of the access-control entry.",
- "type": "string"
- },
- "kind": {
- "default": "storage#objectAccessControl",
- "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.",
- "type": "string"
- },
- "object": {
- "description": "The name of the object, if applied to an object.",
- "type": "string"
- },
- "projectTeam": {
- "description": "The project team associated with the entity, if any.",
- "properties": {
- "projectNumber": {
- "description": "The project number.",
- "type": "string"
- },
- "team": {
- "description": "The team.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "role": {
- "annotations": {
- "required": [
- "storage.defaultObjectAccessControls.insert",
- "storage.objectAccessControls.insert"
- ]
- },
- "description": "The access permission for the entity.",
- "type": "string"
- },
- "selfLink": {
- "description": "The link to this access-control entry.",
- "type": "string"
- }
- },
- "type": "object"
+ "role": {
+ "type": "string",
+ "description": "The access permission for the entity.",
+ "annotations": {
+ "required": [
+ "storage.bucketAccessControls.insert"
+ ]
+ }
},
- "ObjectAccessControls": {
- "description": "An access-control list.",
- "id": "ObjectAccessControls",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#objectAccessControls",
- "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Objects": {
- "description": "A list of objects.",
- "id": "Objects",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Object"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#objects",
- "description": "The kind of item this is. For lists of objects, this is always storage#objects.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
- "type": "string"
- },
- "prefixes": {
- "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "Policy": {
- "description": "A bucket/object IAM policy.",
- "id": "Policy",
- "properties": {
- "bindings": {
- "annotations": {
- "required": [
- "storage.buckets.setIamPolicy",
- "storage.objects.setIamPolicy"
- ]
- },
- "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.",
- "items": {
- "properties": {
- "condition": {
- "type": "any"
- },
- "members": {
- "annotations": {
- "required": [
- "storage.buckets.setIamPolicy",
- "storage.objects.setIamPolicy"
- ]
- },
- "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "role": {
- "annotations": {
- "required": [
- "storage.buckets.setIamPolicy",
- "storage.objects.setIamPolicy"
- ]
- },
- "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the policy.",
- "format": "byte",
- "type": "string"
- },
- "kind": {
- "default": "storage#policy",
- "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.",
- "type": "string"
- },
- "resourceId": {
- "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "RewriteResponse": {
- "description": "A rewrite response.",
- "id": "RewriteResponse",
- "properties": {
- "done": {
- "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.",
- "type": "boolean"
- },
- "kind": {
- "default": "storage#rewriteResponse",
- "description": "The kind of item this is.",
- "type": "string"
- },
- "objectSize": {
- "description": "The total size of the object being copied in bytes. This property is always present in the response.",
- "format": "int64",
- "type": "string"
- },
- "resource": {
- "$ref": "Object",
- "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes."
- },
- "rewriteToken": {
- "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.",
- "type": "string"
- },
- "totalBytesRewritten": {
- "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "ServiceAccount": {
- "description": "A subscription to receive Google PubSub notifications.",
- "id": "ServiceAccount",
- "properties": {
- "email_address": {
- "description": "The ID of the notification.",
- "type": "string"
- },
- "kind": {
- "default": "storage#serviceAccount",
- "description": "The kind of item this is. For notifications, this is always storage#notification.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "TestIamPermissionsResponse": {
- "description": "A storage.(buckets|objects).testIamPermissions response.",
- "id": "TestIamPermissionsResponse",
- "properties": {
- "kind": {
- "default": "storage#testIamPermissionsResponse",
- "description": "The kind of item this is.",
- "type": "string"
- },
- "permissions": {
- "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
+ "selfLink": {
+ "type": "string",
+ "description": "The link to this access-control entry."
}
+ }
},
- "servicePath": "storage/v1/",
- "title": "Cloud Storage JSON API",
- "version": "v1"
-}
\ No newline at end of file
+ "BucketAccessControls": {
+ "id": "BucketAccessControls",
+ "type": "object",
+ "description": "An access-control list.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "BucketAccessControl"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.",
+ "default": "storage#bucketAccessControls"
+ }
+ }
+ },
+ "Buckets": {
+ "id": "Buckets",
+ "type": "object",
+ "description": "A list of buckets.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "Bucket"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.",
+ "default": "storage#buckets"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results."
+ }
+ }
+ },
+ "Channel": {
+ "id": "Channel",
+ "type": "object",
+ "description": "An notification channel used to watch for resource changes.",
+ "properties": {
+ "address": {
+ "type": "string",
+ "description": "The address where notifications are delivered for this channel."
+ },
+ "expiration": {
+ "type": "string",
+ "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "A UUID or similar unique string that identifies this channel."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".",
+ "default": "api#channel"
+ },
+ "params": {
+ "type": "object",
+ "description": "Additional parameters controlling delivery channel behavior. Optional.",
+ "additionalProperties": {
+ "type": "string",
+ "description": "Declares a new parameter by name."
+ }
+ },
+ "payload": {
+ "type": "boolean",
+ "description": "A Boolean value to indicate whether payload is wanted. Optional."
+ },
+ "resourceId": {
+ "type": "string",
+ "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions."
+ },
+ "resourceUri": {
+ "type": "string",
+ "description": "A version-specific identifier for the watched resource."
+ },
+ "token": {
+ "type": "string",
+ "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional."
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of delivery mechanism used for this channel."
+ }
+ }
+ },
+ "ComposeRequest": {
+ "id": "ComposeRequest",
+ "type": "object",
+ "description": "A Compose request.",
+ "properties": {
+ "destination": {
+ "$ref": "Object",
+ "description": "Properties of the resulting object."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is.",
+ "default": "storage#composeRequest"
+ },
+ "sourceObjects": {
+ "type": "array",
+ "description": "The list of source objects that will be concatenated into a single object.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "generation": {
+ "type": "string",
+ "description": "The generation of this object to use as the source.",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string",
+ "description": "The source object's name. The source object's bucket is implicitly the destination bucket.",
+ "annotations": {
+ "required": [
+ "storage.objects.compose"
+ ]
+ }
+ },
+ "objectPreconditions": {
+ "type": "object",
+ "description": "Conditions that must be met for this operation to execute.",
+ "properties": {
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.",
+ "format": "int64"
+ }
+ }
+ }
+ }
+ },
+ "annotations": {
+ "required": [
+ "storage.objects.compose"
+ ]
+ }
+ }
+ }
+ },
+ "Notification": {
+ "id": "Notification",
+ "type": "object",
+ "description": "A subscription to receive Google PubSub notifications.",
+ "properties": {
+ "custom_attributes": {
+ "type": "object",
+ "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for this subscription notification."
+ },
+ "event_types": {
+ "type": "array",
+ "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the notification."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For notifications, this is always storage#notification.",
+ "default": "storage#notification"
+ },
+ "object_name_prefix": {
+ "type": "string",
+ "description": "If present, only apply this notification configuration to object names that begin with this prefix."
+ },
+ "payload_format": {
+ "type": "string",
+ "description": "The desired content of the Payload.",
+ "default": "JSON_API_V1"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The canonical URL of this notification."
+ },
+ "topic": {
+ "type": "string",
+ "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'",
+ "annotations": {
+ "required": [
+ "storage.notifications.insert"
+ ]
+ }
+ }
+ }
+ },
+ "Notifications": {
+ "id": "Notifications",
+ "type": "object",
+ "description": "A list of notification subscriptions.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "Notification"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.",
+ "default": "storage#notifications"
+ }
+ }
+ },
+ "Object": {
+ "id": "Object",
+ "type": "object",
+ "description": "An object.",
+ "properties": {
+ "acl": {
+ "type": "array",
+ "description": "Access controls on the object.",
+ "items": {
+ "$ref": "ObjectAccessControl"
+ },
+ "annotations": {
+ "required": [
+ "storage.objects.update"
+ ]
+ }
+ },
+ "bucket": {
+ "type": "string",
+ "description": "The name of the bucket containing this object."
+ },
+ "cacheControl": {
+ "type": "string",
+ "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600."
+ },
+ "componentCount": {
+ "type": "integer",
+ "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.",
+ "format": "int32"
+ },
+ "contentDisposition": {
+ "type": "string",
+ "description": "Content-Disposition of the object data."
+ },
+ "contentEncoding": {
+ "type": "string",
+ "description": "Content-Encoding of the object data."
+ },
+ "contentLanguage": {
+ "type": "string",
+ "description": "Content-Language of the object data."
+ },
+ "contentType": {
+ "type": "string",
+ "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream."
+ },
+ "crc32c": {
+ "type": "string",
+ "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices."
+ },
+ "customerEncryption": {
+ "type": "object",
+ "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
+ "properties": {
+ "encryptionAlgorithm": {
+ "type": "string",
+ "description": "The encryption algorithm."
+ },
+ "keySha256": {
+ "type": "string",
+ "description": "SHA256 hash value of the encryption key."
+ }
+ }
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the object."
+ },
+ "generation": {
+ "type": "string",
+ "description": "The content generation of this object. Used for object versioning.",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the object, including the bucket name, object name, and generation number."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For objects, this is always storage#object.",
+ "default": "storage#object"
+ },
+ "kmsKeyName": {
+ "type": "string",
+ "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key."
+ },
+ "md5Hash": {
+ "type": "string",
+ "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices."
+ },
+ "mediaLink": {
+ "type": "string",
+ "description": "Media download link."
+ },
+ "metadata": {
+ "type": "object",
+ "description": "User-provided metadata, in key/value pairs.",
+ "additionalProperties": {
+ "type": "string",
+ "description": "An individual metadata entry."
+ }
+ },
+ "metageneration": {
+ "type": "string",
+ "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the object. Required if not specified by URL parameter."
+ },
+ "owner": {
+ "type": "object",
+ "description": "The owner of the object. This will always be the uploader of the object.",
+ "properties": {
+ "entity": {
+ "type": "string",
+ "description": "The entity, in the form user-userId."
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity."
+ }
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The link to this object."
+ },
+ "size": {
+ "type": "string",
+ "description": "Content-Length of the data in bytes.",
+ "format": "uint64"
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "Storage class of the object."
+ },
+ "timeCreated": {
+ "type": "string",
+ "description": "The creation time of the object in RFC 3339 format.",
+ "format": "date-time"
+ },
+ "timeDeleted": {
+ "type": "string",
+ "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
+ "format": "date-time"
+ },
+ "timeStorageClassUpdated": {
+ "type": "string",
+ "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.",
+ "format": "date-time"
+ },
+ "updated": {
+ "type": "string",
+ "description": "The modification time of the object metadata in RFC 3339 format.",
+ "format": "date-time"
+ }
+ }
+ },
+ "ObjectAccessControl": {
+ "id": "ObjectAccessControl",
+ "type": "object",
+ "description": "An access-control entry.",
+ "properties": {
+ "bucket": {
+ "type": "string",
+ "description": "The name of the bucket."
+ },
+ "domain": {
+ "type": "string",
+ "description": "The domain associated with the entity, if any."
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address associated with the entity, if any."
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
+ "annotations": {
+ "required": [
+ "storage.defaultObjectAccessControls.insert",
+ "storage.objectAccessControls.insert"
+ ]
+ }
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity, if any."
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the access-control entry."
+ },
+ "generation": {
+ "type": "string",
+ "description": "The content generation of the object, if applied to an object.",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the access-control entry."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.",
+ "default": "storage#objectAccessControl"
+ },
+ "object": {
+ "type": "string",
+ "description": "The name of the object, if applied to an object."
+ },
+ "projectTeam": {
+ "type": "object",
+ "description": "The project team associated with the entity, if any.",
+ "properties": {
+ "projectNumber": {
+ "type": "string",
+ "description": "The project number."
+ },
+ "team": {
+ "type": "string",
+ "description": "The team."
+ }
+ }
+ },
+ "role": {
+ "type": "string",
+ "description": "The access permission for the entity.",
+ "annotations": {
+ "required": [
+ "storage.defaultObjectAccessControls.insert",
+ "storage.objectAccessControls.insert"
+ ]
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The link to this access-control entry."
+ }
+ }
+ },
+ "ObjectAccessControls": {
+ "id": "ObjectAccessControls",
+ "type": "object",
+ "description": "An access-control list.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "ObjectAccessControl"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.",
+ "default": "storage#objectAccessControls"
+ }
+ }
+ },
+ "Objects": {
+ "id": "Objects",
+ "type": "object",
+ "description": "A list of objects.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "Object"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of objects, this is always storage#objects.",
+ "default": "storage#objects"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results."
+ },
+ "prefixes": {
+ "type": "array",
+ "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "Policy": {
+ "id": "Policy",
+ "type": "object",
+ "description": "A bucket/object IAM policy.",
+ "properties": {
+ "bindings": {
+ "type": "array",
+ "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "condition": {
+ "type": "any"
+ },
+ "members": {
+ "type": "array",
+ "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project",
+ "items": {
+ "type": "string"
+ },
+ "annotations": {
+ "required": [
+ "storage.buckets.setIamPolicy",
+ "storage.objects.setIamPolicy"
+ ]
+ }
+ },
+ "role": {
+ "type": "string",
+ "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.",
+ "annotations": {
+ "required": [
+ "storage.buckets.setIamPolicy",
+ "storage.objects.setIamPolicy"
+ ]
+ }
+ }
+ }
+ },
+ "annotations": {
+ "required": [
+ "storage.buckets.setIamPolicy",
+ "storage.objects.setIamPolicy"
+ ]
+ }
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the policy.",
+ "format": "byte"
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.",
+ "default": "storage#policy"
+ },
+ "resourceId": {
+ "type": "string",
+ "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input."
+ }
+ }
+ },
+ "RewriteResponse": {
+ "id": "RewriteResponse",
+ "type": "object",
+ "description": "A rewrite response.",
+ "properties": {
+ "done": {
+ "type": "boolean",
+ "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is.",
+ "default": "storage#rewriteResponse"
+ },
+ "objectSize": {
+ "type": "string",
+ "description": "The total size of the object being copied in bytes. This property is always present in the response.",
+ "format": "int64"
+ },
+ "resource": {
+ "$ref": "Object",
+ "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes."
+ },
+ "rewriteToken": {
+ "type": "string",
+ "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy."
+ },
+ "totalBytesRewritten": {
+ "type": "string",
+ "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.",
+ "format": "int64"
+ }
+ }
+ },
+ "ServiceAccount": {
+ "id": "ServiceAccount",
+ "type": "object",
+ "description": "A subscription to receive Google PubSub notifications.",
+ "properties": {
+ "email_address": {
+ "type": "string",
+ "description": "The ID of the notification."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For notifications, this is always storage#notification.",
+ "default": "storage#serviceAccount"
+ }
+ }
+ },
+ "TestIamPermissionsResponse": {
+ "id": "TestIamPermissionsResponse",
+ "type": "object",
+ "description": "A storage.(buckets|objects).testIamPermissions response.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is.",
+ "default": "storage#testIamPermissionsResponse"
+ },
+ "permissions": {
+ "type": "array",
+ "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "resources": {
+ "bucketAccessControls": {
+ "methods": {
+ "delete": {
+ "id": "storage.bucketAccessControls.delete",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "get": {
+ "id": "storage.bucketAccessControls.get",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "GET",
+ "description": "Returns the ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "insert": {
+ "id": "storage.bucketAccessControls.insert",
+ "path": "b/{bucket}/acl",
+ "httpMethod": "POST",
+ "description": "Creates a new ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "BucketAccessControl"
+ },
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "list": {
+ "id": "storage.bucketAccessControls.list",
+ "path": "b/{bucket}/acl",
+ "httpMethod": "GET",
+ "description": "Retrieves ACL entries on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "BucketAccessControls"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "patch": {
+ "id": "storage.bucketAccessControls.patch",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "PATCH",
+ "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "BucketAccessControl"
+ },
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.bucketAccessControls.update",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "PUT",
+ "description": "Updates an ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "BucketAccessControl"
+ },
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "buckets": {
+ "methods": {
+ "delete": {
+ "id": "storage.buckets.delete",
+ "path": "b/{bucket}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes an empty bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "If set, only deletes the bucket if its metageneration matches this value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "If set, only deletes the bucket if its metageneration does not match this value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "get": {
+ "id": "storage.buckets.get",
+ "path": "b/{bucket}",
+ "httpMethod": "GET",
+ "description": "Returns metadata for the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "getIamPolicy": {
+ "id": "storage.buckets.getIamPolicy",
+ "path": "b/{bucket}/iam",
+ "httpMethod": "GET",
+ "description": "Returns an IAM policy for the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "Policy"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "insert": {
+ "id": "storage.buckets.insert",
+ "path": "b",
+ "httpMethod": "POST",
+ "description": "Creates a new bucket.",
+ "parameters": {
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "private",
+ "projectPrivate",
+ "publicRead",
+ "publicReadWrite"
+ ],
+ "enumDescriptions": [
+ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ "Project team owners get OWNER access.",
+ "Project team members get access according to their roles.",
+ "Project team owners get OWNER access, and allUsers get READER access.",
+ "Project team owners get OWNER access, and allUsers get WRITER access."
+ ],
+ "location": "query"
+ },
+ "predefinedDefaultObjectAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of default object access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "A valid API project identifier.",
+ "required": true,
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Bucket"
+ },
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "list": {
+ "id": "storage.buckets.list",
+ "path": "b",
+ "httpMethod": "GET",
+ "description": "Retrieves a list of buckets for a given project.",
+ "parameters": {
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.",
+ "default": "1000",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A previously-returned page token representing part of the larger set of results to view.",
+ "location": "query"
+ },
+ "prefix": {
+ "type": "string",
+ "description": "Filter results to buckets whose names begin with this prefix.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "A valid API project identifier.",
+ "required": true,
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "Buckets"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "patch": {
+ "id": "storage.buckets.patch",
+ "path": "b/{bucket}",
+ "httpMethod": "PATCH",
+ "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "private",
+ "projectPrivate",
+ "publicRead",
+ "publicReadWrite"
+ ],
+ "enumDescriptions": [
+ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ "Project team owners get OWNER access.",
+ "Project team members get access according to their roles.",
+ "Project team owners get OWNER access, and allUsers get READER access.",
+ "Project team owners get OWNER access, and allUsers get WRITER access."
+ ],
+ "location": "query"
+ },
+ "predefinedDefaultObjectAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of default object access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Bucket"
+ },
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "setIamPolicy": {
+ "id": "storage.buckets.setIamPolicy",
+ "path": "b/{bucket}/iam",
+ "httpMethod": "PUT",
+ "description": "Updates an IAM policy for the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Policy"
+ },
+ "response": {
+ "$ref": "Policy"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "testIamPermissions": {
+ "id": "storage.buckets.testIamPermissions",
+ "path": "b/{bucket}/iam/testPermissions",
+ "httpMethod": "GET",
+ "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "permissions": {
+ "type": "string",
+ "description": "Permissions to test.",
+ "required": true,
+ "repeated": true,
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "permissions"
+ ],
+ "response": {
+ "$ref": "TestIamPermissionsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "update": {
+ "id": "storage.buckets.update",
+ "path": "b/{bucket}",
+ "httpMethod": "PUT",
+ "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "private",
+ "projectPrivate",
+ "publicRead",
+ "publicReadWrite"
+ ],
+ "enumDescriptions": [
+ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ "Project team owners get OWNER access.",
+ "Project team members get access according to their roles.",
+ "Project team owners get OWNER access, and allUsers get READER access.",
+ "Project team owners get OWNER access, and allUsers get WRITER access."
+ ],
+ "location": "query"
+ },
+ "predefinedDefaultObjectAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of default object access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Bucket"
+ },
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "channels": {
+ "methods": {
+ "stop": {
+ "id": "storage.channels.stop",
+ "path": "channels/stop",
+ "httpMethod": "POST",
+ "description": "Stop watching resources through this channel",
+ "request": {
+ "$ref": "Channel",
+ "parameterName": "resource"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ }
+ }
+ },
+ "defaultObjectAccessControls": {
+ "methods": {
+ "delete": {
+ "id": "storage.defaultObjectAccessControls.delete",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "get": {
+ "id": "storage.defaultObjectAccessControls.get",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "GET",
+ "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "insert": {
+ "id": "storage.defaultObjectAccessControls.insert",
+ "path": "b/{bucket}/defaultObjectAcl",
+ "httpMethod": "POST",
+ "description": "Creates a new default object ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "list": {
+ "id": "storage.defaultObjectAccessControls.list",
+ "path": "b/{bucket}/defaultObjectAcl",
+ "httpMethod": "GET",
+ "description": "Retrieves default object ACL entries on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControls"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "patch": {
+ "id": "storage.defaultObjectAccessControls.patch",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "PATCH",
+ "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.defaultObjectAccessControls.update",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "PUT",
+ "description": "Updates a default object ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "notifications": {
+ "methods": {
+ "delete": {
+ "id": "storage.notifications.delete",
+ "path": "b/{bucket}/notificationConfigs/{notification}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes a notification subscription.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "The parent bucket of the notification.",
+ "required": true,
+ "location": "path"
+ },
+ "notification": {
+ "type": "string",
+ "description": "ID of the notification to delete.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "notification"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "get": {
+ "id": "storage.notifications.get",
+ "path": "b/{bucket}/notificationConfigs/{notification}",
+ "httpMethod": "GET",
+ "description": "View a notification configuration.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "The parent bucket of the notification.",
+ "required": true,
+ "location": "path"
+ },
+ "notification": {
+ "type": "string",
+ "description": "Notification ID",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "notification"
+ ],
+ "response": {
+ "$ref": "Notification"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "insert": {
+ "id": "storage.notifications.insert",
+ "path": "b/{bucket}/notificationConfigs",
+ "httpMethod": "POST",
+ "description": "Creates a notification subscription for a given bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "The parent bucket of the notification.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Notification"
+ },
+ "response": {
+ "$ref": "Notification"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "list": {
+ "id": "storage.notifications.list",
+ "path": "b/{bucket}/notificationConfigs",
+ "httpMethod": "GET",
+ "description": "Retrieves a list of notification subscriptions for a given bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a Google Cloud Storage bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "Notifications"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ }
+ }
+ },
+ "objectAccessControls": {
+ "methods": {
+ "delete": {
+ "id": "storage.objectAccessControls.delete",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "get": {
+ "id": "storage.objectAccessControls.get",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "GET",
+ "description": "Returns the ACL entry for the specified entity on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "insert": {
+ "id": "storage.objectAccessControls.insert",
+ "path": "b/{bucket}/o/{object}/acl",
+ "httpMethod": "POST",
+ "description": "Creates a new ACL entry on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "list": {
+ "id": "storage.objectAccessControls.list",
+ "path": "b/{bucket}/o/{object}/acl",
+ "httpMethod": "GET",
+ "description": "Retrieves ACL entries on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControls"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "patch": {
+ "id": "storage.objectAccessControls.patch",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "PATCH",
+ "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.objectAccessControls.update",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "PUT",
+ "description": "Updates an ACL entry on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "objects": {
+ "methods": {
+ "compose": {
+ "id": "storage.objects.compose",
+ "path": "b/{destinationBucket}/o/{destinationObject}/compose",
+ "httpMethod": "POST",
+ "description": "Concatenates a list of existing objects into a new object in the same bucket.",
+ "parameters": {
+ "destinationBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationObject": {
+ "type": "string",
+ "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationPredefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to the destination object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "kmsKeyName": {
+ "type": "string",
+ "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "destinationBucket",
+ "destinationObject"
+ ],
+ "request": {
+ "$ref": "ComposeRequest"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "copy": {
+ "id": "storage.objects.copy",
+ "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
+ "httpMethod": "POST",
+ "description": "Copies a source object to a destination object. Optionally overrides metadata.",
+ "parameters": {
+ "destinationBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationObject": {
+ "type": "string",
+ "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationPredefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to the destination object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "sourceBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to find the source object.",
+ "required": true,
+ "location": "path"
+ },
+ "sourceGeneration": {
+ "type": "string",
+ "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "sourceObject": {
+ "type": "string",
+ "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "sourceBucket",
+ "sourceObject",
+ "destinationBucket",
+ "destinationObject"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "delete": {
+ "id": "storage.objects.delete",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "DELETE",
+ "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "get": {
+ "id": "storage.objects.get",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "GET",
+ "description": "Retrieves an object or its metadata.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "getIamPolicy": {
+ "id": "storage.objects.getIamPolicy",
+ "path": "b/{bucket}/o/{object}/iam",
+ "httpMethod": "GET",
+ "description": "Returns an IAM policy for the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "response": {
+ "$ref": "Policy"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "insert": {
+ "id": "storage.objects.insert",
+ "path": "b/{bucket}/o",
+ "httpMethod": "POST",
+ "description": "Stores a new object and metadata.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
+ "required": true,
+ "location": "path"
+ },
+ "contentEncoding": {
+ "type": "string",
+ "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "kmsKeyName": {
+ "type": "string",
+ "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
+ "location": "query"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "location": "query"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true,
+ "supportsMediaUpload": true,
+ "mediaUpload": {
+ "accept": [
+ "*/*"
+ ],
+ "protocols": {
+ "simple": {
+ "multipart": true,
+ "path": "/upload/storage/v1/b/{bucket}/o"
+ },
+ "resumable": {
+ "multipart": true,
+ "path": "/resumable/upload/storage/v1/b/{bucket}/o"
+ }
+ }
+ }
+ },
+ "list": {
+ "id": "storage.objects.list",
+ "path": "b/{bucket}/o",
+ "httpMethod": "GET",
+ "description": "Retrieves a list of objects matching the criteria.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to look for objects.",
+ "required": true,
+ "location": "path"
+ },
+ "delimiter": {
+ "type": "string",
+ "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
+ "default": "1000",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A previously-returned page token representing part of the larger set of results to view.",
+ "location": "query"
+ },
+ "prefix": {
+ "type": "string",
+ "description": "Filter results to objects whose names begin with this prefix.",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ },
+ "versions": {
+ "type": "boolean",
+ "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "Objects"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsSubscription": true
+ },
+ "patch": {
+ "id": "storage.objects.patch",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "PATCH",
+ "description": "Updates an object's metadata. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "rewrite": {
+ "id": "storage.objects.rewrite",
+ "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
+ "httpMethod": "POST",
+ "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
+ "parameters": {
+ "destinationBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationKmsKeyName": {
+ "type": "string",
+ "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
+ "location": "query"
+ },
+ "destinationObject": {
+ "type": "string",
+ "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationPredefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to the destination object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "maxBytesRewrittenPerCall": {
+ "type": "string",
+ "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
+ "format": "int64",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "rewriteToken": {
+ "type": "string",
+ "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
+ "location": "query"
+ },
+ "sourceBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to find the source object.",
+ "required": true,
+ "location": "path"
+ },
+ "sourceGeneration": {
+ "type": "string",
+ "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "sourceObject": {
+ "type": "string",
+ "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "sourceBucket",
+ "sourceObject",
+ "destinationBucket",
+ "destinationObject"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "RewriteResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "setIamPolicy": {
+ "id": "storage.objects.setIamPolicy",
+ "path": "b/{bucket}/o/{object}/iam",
+ "httpMethod": "PUT",
+ "description": "Updates an IAM policy for the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "Policy"
+ },
+ "response": {
+ "$ref": "Policy"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "testIamPermissions": {
+ "id": "storage.objects.testIamPermissions",
+ "path": "b/{bucket}/o/{object}/iam/testPermissions",
+ "httpMethod": "GET",
+ "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "permissions": {
+ "type": "string",
+ "description": "Permissions to test.",
+ "required": true,
+ "repeated": true,
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "permissions"
+ ],
+ "response": {
+ "$ref": "TestIamPermissionsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "update": {
+ "id": "storage.objects.update",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "PUT",
+ "description": "Updates an object's metadata.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "watchAll": {
+ "id": "storage.objects.watchAll",
+ "path": "b/{bucket}/o/watch",
+ "httpMethod": "POST",
+ "description": "Watch for changes on all objects in a bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to look for objects.",
+ "required": true,
+ "location": "path"
+ },
+ "delimiter": {
+ "type": "string",
+ "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
+ "default": "1000",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A previously-returned page token representing part of the larger set of results to view.",
+ "location": "query"
+ },
+ "prefix": {
+ "type": "string",
+ "description": "Filter results to objects whose names begin with this prefix.",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ },
+ "versions": {
+ "type": "boolean",
+ "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Channel",
+ "parameterName": "resource"
+ },
+ "response": {
+ "$ref": "Channel"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsSubscription": true
+ }
+ }
+ },
+ "projects": {
+ "resources": {
+ "serviceAccount": {
+ "methods": {
+ "get": {
+ "id": "storage.projects.serviceAccount.get",
+ "path": "projects/{projectId}/serviceAccount",
+ "httpMethod": "GET",
+ "description": "Get the email address of this project's Google Cloud Storage service account.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "Project ID",
+ "required": true,
+ "location": "path"
+ },
+ "userProject": {
+ "type": "string",
+ "description": "The project to be billed for this request, for Requester Pays buckets.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "response": {
+ "$ref": "ServiceAccount"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index 279344d8e..ae840c4d8 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -1,7 +1,5 @@
// Package storage provides access to the Cloud Storage JSON API.
//
-// This package is DEPRECATED. Use package cloud.google.com/go/storage instead.
-//
// See https://developers.google.com/storage/docs/json_api/
//
// Usage example:
@@ -206,34 +204,19 @@ type Bucket struct {
// configuration.
Cors []*BucketCors `json:"cors,omitempty"`
- // DefaultEventBasedHold: The default value for event-based hold on
- // newly created objects in this bucket. Event-based hold is a way to
- // retain objects indefinitely until an event occurs, signified by the
- // hold's release. After being released, such objects will be subject to
- // bucket-level retention (if any). One sample use case of this flag is
- // for banks to hold loan documents for at least 3 years after loan is
- // paid in full. Here, bucket-level retention is 3 years and the event
- // is loan being paid in full. In this example, these objects will be
- // held intact for any number of years until the event has occurred
- // (event-based hold on the object is released) and then 3 more years
- // after that. That means retention duration of the objects begins from
- // the moment event-based hold transitioned from true to false. Objects
- // under event-based hold cannot be deleted, overwritten or archived
- // until the hold is removed.
- DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
-
// DefaultObjectAcl: Default access controls to apply to new objects
// when no ACL is provided.
DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"`
- // Encryption: Encryption configuration for a bucket.
+ // Encryption: Encryption configuration used by default for newly
+ // inserted objects, when no encryption config is specified.
Encryption *BucketEncryption `json:"encryption,omitempty"`
// Etag: HTTP 1.1 Entity tag for the bucket.
Etag string `json:"etag,omitempty"`
- // Id: The ID of the bucket. For buckets, the id and name properties are
- // the same.
+ // Id: The ID of the bucket. For buckets, the id and name properities
+ // are the same.
Id string `json:"id,omitempty"`
// Kind: The kind of item this is. For buckets, this is always
@@ -271,18 +254,6 @@ type Bucket struct {
// to.
ProjectNumber uint64 `json:"projectNumber,omitempty,string"`
- // RetentionPolicy: The bucket's retention policy. The retention policy
- // enforces a minimum retention time for all objects contained in the
- // bucket, based on their creation time. Any attempt to overwrite or
- // delete objects younger than the retention period will result in a
- // PERMISSION_DENIED error. An unlocked retention policy can be modified
- // or removed from the bucket via a storage.buckets.update operation. A
- // locked retention policy cannot be removed or shortened in duration
- // for the lifetime of the bucket. Attempting to remove or decrease
- // period of a locked retention policy will result in a
- // PERMISSION_DENIED error.
- RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"`
-
// SelfLink: The URI of this bucket.
SelfLink string `json:"selfLink,omitempty"`
@@ -331,15 +302,14 @@ type Bucket struct {
}
func (s *Bucket) MarshalJSON() ([]byte, error) {
- type NoMethod Bucket
- raw := NoMethod(*s)
+ type noMethod Bucket
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BucketBilling: The bucket's billing configuration.
type BucketBilling struct {
- // RequesterPays: When set to true, Requester Pays is enabled for this
- // bucket.
+ // RequesterPays: When set to true, bucket is requester pays.
RequesterPays bool `json:"requesterPays,omitempty"`
// ForceSendFields is a list of field names (e.g. "RequesterPays") to
@@ -360,8 +330,8 @@ type BucketBilling struct {
}
func (s *BucketBilling) MarshalJSON() ([]byte, error) {
- type NoMethod BucketBilling
- raw := NoMethod(*s)
+ type noMethod BucketBilling
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -403,16 +373,14 @@ type BucketCors struct {
}
func (s *BucketCors) MarshalJSON() ([]byte, error) {
- type NoMethod BucketCors
- raw := NoMethod(*s)
+ type noMethod BucketCors
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
-// BucketEncryption: Encryption configuration for a bucket.
+// BucketEncryption: Encryption configuration used by default for newly
+// inserted objects, when no encryption config is specified.
type BucketEncryption struct {
- // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt
- // objects inserted into this bucket, if no encryption method is
- // specified.
DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"`
// ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName")
@@ -434,8 +402,8 @@ type BucketEncryption struct {
}
func (s *BucketEncryption) MarshalJSON() ([]byte, error) {
- type NoMethod BucketEncryption
- raw := NoMethod(*s)
+ type noMethod BucketEncryption
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -464,8 +432,8 @@ type BucketLifecycle struct {
}
func (s *BucketLifecycle) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycle
- raw := NoMethod(*s)
+ type noMethod BucketLifecycle
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -494,8 +462,8 @@ type BucketLifecycleRule struct {
}
func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycleRule
- raw := NoMethod(*s)
+ type noMethod BucketLifecycleRule
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -527,8 +495,8 @@ type BucketLifecycleRuleAction struct {
}
func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycleRuleAction
- raw := NoMethod(*s)
+ type noMethod BucketLifecycleRuleAction
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -549,14 +517,6 @@ type BucketLifecycleRuleCondition struct {
// matches archived objects.
IsLive *bool `json:"isLive,omitempty"`
- // MatchesPattern: A regular expression that satisfies the RE2 syntax.
- // This condition is satisfied when the name of the object matches the
- // RE2 pattern. Note: This feature is currently in the "Early Access"
- // launch stage and is only available to a whitelisted set of users;
- // that means that this feature may be changed in backward-incompatible
- // ways and that it is not guaranteed to be released.
- MatchesPattern string `json:"matchesPattern,omitempty"`
-
// MatchesStorageClass: Objects having any of the storage classes
// specified by this condition will be matched. Values include
// MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and
@@ -586,8 +546,8 @@ type BucketLifecycleRuleCondition struct {
}
func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycleRuleCondition
- raw := NoMethod(*s)
+ type noMethod BucketLifecycleRuleCondition
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -620,8 +580,8 @@ type BucketLogging struct {
}
func (s *BucketLogging) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLogging
- raw := NoMethod(*s)
+ type noMethod BucketLogging
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -652,57 +612,8 @@ type BucketOwner struct {
}
func (s *BucketOwner) MarshalJSON() ([]byte, error) {
- type NoMethod BucketOwner
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketRetentionPolicy: The bucket's retention policy. The retention
-// policy enforces a minimum retention time for all objects contained in
-// the bucket, based on their creation time. Any attempt to overwrite or
-// delete objects younger than the retention period will result in a
-// PERMISSION_DENIED error. An unlocked retention policy can be modified
-// or removed from the bucket via a storage.buckets.update operation. A
-// locked retention policy cannot be removed or shortened in duration
-// for the lifetime of the bucket. Attempting to remove or decrease
-// period of a locked retention policy will result in a
-// PERMISSION_DENIED error.
-type BucketRetentionPolicy struct {
- // EffectiveTime: Server-determined value that indicates the time from
- // which policy was enforced and effective. This value is in RFC 3339
- // format.
- EffectiveTime string `json:"effectiveTime,omitempty"`
-
- // IsLocked: Once locked, an object retention policy cannot be modified.
- IsLocked bool `json:"isLocked,omitempty"`
-
- // RetentionPeriod: The duration in seconds that objects need to be
- // retained. Retention duration must be greater than zero and less than
- // 100 years. Note that enforcement of retention periods less than a day
- // is not guaranteed. Such periods should only be used for testing
- // purposes.
- RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"`
-
- // ForceSendFields is a list of field names (e.g. "EffectiveTime") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "EffectiveTime") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) {
- type NoMethod BucketRetentionPolicy
- raw := NoMethod(*s)
+ type noMethod BucketOwner
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -730,8 +641,8 @@ type BucketVersioning struct {
}
func (s *BucketVersioning) MarshalJSON() ([]byte, error) {
- type NoMethod BucketVersioning
- raw := NoMethod(*s)
+ type noMethod BucketVersioning
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -770,8 +681,8 @@ type BucketWebsite struct {
}
func (s *BucketWebsite) MarshalJSON() ([]byte, error) {
- type NoMethod BucketWebsite
- raw := NoMethod(*s)
+ type noMethod BucketWebsite
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -847,8 +758,8 @@ type BucketAccessControl struct {
}
func (s *BucketAccessControl) MarshalJSON() ([]byte, error) {
- type NoMethod BucketAccessControl
- raw := NoMethod(*s)
+ type noMethod BucketAccessControl
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -879,8 +790,8 @@ type BucketAccessControlProjectTeam struct {
}
func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
- type NoMethod BucketAccessControlProjectTeam
- raw := NoMethod(*s)
+ type noMethod BucketAccessControlProjectTeam
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -915,8 +826,8 @@ type BucketAccessControls struct {
}
func (s *BucketAccessControls) MarshalJSON() ([]byte, error) {
- type NoMethod BucketAccessControls
- raw := NoMethod(*s)
+ type noMethod BucketAccessControls
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -956,8 +867,8 @@ type Buckets struct {
}
func (s *Buckets) MarshalJSON() ([]byte, error) {
- type NoMethod Buckets
- raw := NoMethod(*s)
+ type noMethod Buckets
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1022,8 +933,8 @@ type Channel struct {
}
func (s *Channel) MarshalJSON() ([]byte, error) {
- type NoMethod Channel
- raw := NoMethod(*s)
+ type noMethod Channel
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1057,8 +968,8 @@ type ComposeRequest struct {
}
func (s *ComposeRequest) MarshalJSON() ([]byte, error) {
- type NoMethod ComposeRequest
- raw := NoMethod(*s)
+ type noMethod ComposeRequest
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1066,8 +977,8 @@ type ComposeRequestSourceObjects struct {
// Generation: The generation of this object to use as the source.
Generation int64 `json:"generation,omitempty,string"`
- // Name: The source object's name. All source objects must reside in the
- // same bucket.
+ // Name: The source object's name. The source object's bucket is
+ // implicitly the destination bucket.
Name string `json:"name,omitempty"`
// ObjectPreconditions: Conditions that must be met for this operation
@@ -1092,8 +1003,8 @@ type ComposeRequestSourceObjects struct {
}
func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) {
- type NoMethod ComposeRequestSourceObjects
- raw := NoMethod(*s)
+ type noMethod ComposeRequestSourceObjects
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1125,8 +1036,8 @@ type ComposeRequestSourceObjectsObjectPreconditions struct {
}
func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) {
- type NoMethod ComposeRequestSourceObjectsObjectPreconditions
- raw := NoMethod(*s)
+ type noMethod ComposeRequestSourceObjectsObjectPreconditions
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1190,8 +1101,8 @@ type Notification struct {
}
func (s *Notification) MarshalJSON() ([]byte, error) {
- type NoMethod Notification
- raw := NoMethod(*s)
+ type noMethod Notification
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1226,8 +1137,8 @@ type Notifications struct {
}
func (s *Notifications) MarshalJSON() ([]byte, error) {
- type NoMethod Notifications
- raw := NoMethod(*s)
+ type noMethod Notifications
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1274,21 +1185,6 @@ type Object struct {
// Etag: HTTP 1.1 Entity tag for the object.
Etag string `json:"etag,omitempty"`
- // EventBasedHold: Whether an object is under event-based hold.
- // Event-based hold is a way to retain objects until an event occurs,
- // which is signified by the hold's release (i.e. this value is set to
- // false). After being released (set to false), such objects will be
- // subject to bucket-level retention (if any). One sample use case of
- // this flag is for banks to hold loan documents for at least 3 years
- // after loan is paid in full. Here, bucket-level retention is 3 years
- // and the event is the loan being paid in full. In this example, these
- // objects will be held intact for any number of years until the event
- // has occurred (event-based hold on the object is released) and then 3
- // more years after that. That means retention duration of the objects
- // begins from the moment event-based hold transitioned from true to
- // false.
- EventBasedHold bool `json:"eventBasedHold,omitempty"`
-
// Generation: The content generation of this object. Used for object
// versioning.
Generation int64 `json:"generation,omitempty,string"`
@@ -1330,15 +1226,6 @@ type Object struct {
// the object.
Owner *ObjectOwner `json:"owner,omitempty"`
- // RetentionExpirationTime: A server-determined value that specifies the
- // earliest time that the object's retention period expires. This value
- // is in RFC 3339 format. Note 1: This field is not provided for objects
- // with an active event-based hold, since retention expiration is
- // unknown until the hold is removed. Note 2: This value can be provided
- // even when temporary hold is set (so that the user can reason about
- // policy without having to first unset the temporary hold).
- RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"`
-
// SelfLink: The link to this object.
SelfLink string `json:"selfLink,omitempty"`
@@ -1348,15 +1235,6 @@ type Object struct {
// StorageClass: Storage class of the object.
StorageClass string `json:"storageClass,omitempty"`
- // TemporaryHold: Whether an object is under temporary hold. While this
- // flag is set to true, the object is protected against deletion and
- // overwrites. A common use case of this flag is regulatory
- // investigations where objects need to be retained while the
- // investigation is ongoing. Note that unlike event-based hold,
- // temporary hold does not impact retention expiration time of an
- // object.
- TemporaryHold bool `json:"temporaryHold,omitempty"`
-
// TimeCreated: The creation time of the object in RFC 3339 format.
TimeCreated string `json:"timeCreated,omitempty"`
@@ -1396,8 +1274,8 @@ type Object struct {
}
func (s *Object) MarshalJSON() ([]byte, error) {
- type NoMethod Object
- raw := NoMethod(*s)
+ type noMethod Object
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1429,8 +1307,8 @@ type ObjectCustomerEncryption struct {
}
func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectCustomerEncryption
- raw := NoMethod(*s)
+ type noMethod ObjectCustomerEncryption
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1461,8 +1339,8 @@ type ObjectOwner struct {
}
func (s *ObjectOwner) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectOwner
- raw := NoMethod(*s)
+ type noMethod ObjectOwner
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1545,8 +1423,8 @@ type ObjectAccessControl struct {
}
func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectAccessControl
- raw := NoMethod(*s)
+ type noMethod ObjectAccessControl
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1577,8 +1455,8 @@ type ObjectAccessControlProjectTeam struct {
}
func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectAccessControlProjectTeam
- raw := NoMethod(*s)
+ type noMethod ObjectAccessControlProjectTeam
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1613,8 +1491,8 @@ type ObjectAccessControls struct {
}
func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectAccessControls
- raw := NoMethod(*s)
+ type noMethod ObjectAccessControls
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1658,8 +1536,8 @@ type Objects struct {
}
func (s *Objects) MarshalJSON() ([]byte, error) {
- type NoMethod Objects
- raw := NoMethod(*s)
+ type noMethod Objects
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1707,8 +1585,8 @@ type Policy struct {
}
func (s *Policy) MarshalJSON() ([]byte, error) {
- type NoMethod Policy
- raw := NoMethod(*s)
+ type noMethod Policy
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1790,8 +1668,8 @@ type PolicyBindings struct {
}
func (s *PolicyBindings) MarshalJSON() ([]byte, error) {
- type NoMethod PolicyBindings
- raw := NoMethod(*s)
+ type noMethod PolicyBindings
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1845,8 +1723,8 @@ type RewriteResponse struct {
}
func (s *RewriteResponse) MarshalJSON() ([]byte, error) {
- type NoMethod RewriteResponse
- raw := NoMethod(*s)
+ type noMethod RewriteResponse
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1882,8 +1760,8 @@ type ServiceAccount struct {
}
func (s *ServiceAccount) MarshalJSON() ([]byte, error) {
- type NoMethod ServiceAccount
- raw := NoMethod(*s)
+ type noMethod ServiceAccount
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1935,8 +1813,8 @@ type TestIamPermissionsResponse struct {
}
func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
- type NoMethod TestIamPermissionsResponse
- raw := NoMethod(*s)
+ type noMethod TestIamPermissionsResponse
+ raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
@@ -1961,7 +1839,7 @@ func (r *BucketAccessControlsService) Delete(bucket string, entity string) *Buck
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2000,7 +1878,6 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response,
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
@@ -2046,7 +1923,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -2082,7 +1959,7 @@ func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketA
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2134,7 +2011,6 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -2179,7 +2055,7 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -2205,7 +2081,7 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -2242,7 +2118,7 @@ func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2286,7 +2162,6 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response,
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -2330,7 +2205,7 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -2349,7 +2224,7 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -2388,7 +2263,7 @@ func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsL
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2440,7 +2315,6 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -2484,7 +2358,7 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -2503,7 +2377,7 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -2532,7 +2406,8 @@ type BucketAccessControlsPatchCall struct {
header_ http.Header
}
-// Patch: Patches an ACL entry on the specified bucket.
+// Patch: Updates an ACL entry on the specified bucket. This method
+// supports patch semantics.
func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {
c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -2542,7 +2417,7 @@ func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucket
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2586,7 +2461,6 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
@@ -2631,12 +2505,12 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
- // "description": "Patches an ACL entry on the specified bucket.",
+ // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.bucketAccessControls.patch",
// "parameterOrder": [
@@ -2657,7 +2531,7 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -2699,7 +2573,7 @@ func (r *BucketAccessControlsService) Update(bucket string, entity string, bucke
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2743,7 +2617,6 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response,
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -2788,7 +2661,7 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -2814,7 +2687,7 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -2868,7 +2741,7 @@ func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -2907,7 +2780,6 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
@@ -2957,7 +2829,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -3020,7 +2892,7 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -3072,7 +2944,6 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -3116,7 +2987,7 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -3160,7 +3031,7 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -3199,7 +3070,7 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -3251,7 +3122,6 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -3295,7 +3165,7 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -3314,7 +3184,7 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -3405,7 +3275,7 @@ func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -3449,7 +3319,6 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -3490,7 +3359,7 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -3562,7 +3431,7 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -3635,7 +3504,7 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -3687,7 +3556,6 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -3728,7 +3596,7 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -3778,7 +3646,7 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -3819,153 +3687,6 @@ func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) err
}
}
-// method id "storage.buckets.lockRetentionPolicy":
-
-type BucketsLockRetentionPolicyCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// LockRetentionPolicy: Locks retention policy on a bucket.
-func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall {
- c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.lockRetentionPolicy" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Locks retention policy on a bucket.",
- // "httpMethod": "POST",
- // "id": "storage.buckets.lockRetentionPolicy",
- // "parameterOrder": [
- // "bucket",
- // "ifMetagenerationMatch"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/lockRetentionPolicy",
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
// method id "storage.buckets.patch":
type BucketsPatchCall struct {
@@ -4056,7 +3777,7 @@ func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -4100,7 +3821,6 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
@@ -4144,7 +3864,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -4228,7 +3948,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -4268,7 +3988,7 @@ func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSet
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -4312,7 +4032,6 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error)
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -4356,7 +4075,7 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -4375,7 +4094,7 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -4417,7 +4136,7 @@ func (r *BucketsService) TestIamPermissions(bucket string, permissions []string)
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -4469,7 +4188,6 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -4513,7 +4231,7 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -4540,7 +4258,7 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -4650,7 +4368,7 @@ func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -4694,7 +4412,6 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -4738,7 +4455,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -4822,7 +4539,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -4897,7 +4614,6 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -4958,7 +4674,7 @@ func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -4997,7 +4713,6 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
@@ -5043,7 +4758,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption)
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -5079,7 +4794,7 @@ func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -5131,7 +4846,6 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -5176,7 +4890,7 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -5202,7 +4916,7 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -5240,7 +4954,7 @@ func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccessc
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -5284,7 +4998,6 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -5328,7 +5041,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption)
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -5347,7 +5060,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption)
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -5403,7 +5116,7 @@ func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagen
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -5455,7 +5168,6 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -5499,7 +5211,7 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -5530,7 +5242,7 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -5559,7 +5271,8 @@ type DefaultObjectAccessControlsPatchCall struct {
header_ http.Header
}
-// Patch: Patches a default object ACL entry on the specified bucket.
+// Patch: Updates a default object ACL entry on the specified bucket.
+// This method supports patch semantics.
func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall {
c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -5569,7 +5282,7 @@ func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string,
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -5613,7 +5326,6 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
@@ -5658,12 +5370,12 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption)
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
- // "description": "Patches a default object ACL entry on the specified bucket.",
+ // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.defaultObjectAccessControls.patch",
// "parameterOrder": [
@@ -5684,7 +5396,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption)
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -5726,7 +5438,7 @@ func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -5770,7 +5482,6 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -5815,7 +5526,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption)
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -5841,7 +5552,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption)
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -5881,7 +5592,7 @@ func (r *NotificationsService) Delete(bucket string, notification string) *Notif
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -5920,7 +5631,6 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error)
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
@@ -5966,7 +5676,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6002,7 +5712,7 @@ func (r *NotificationsService) Get(bucket string, notification string) *Notifica
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -6054,7 +5764,6 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -6099,7 +5808,7 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification,
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -6125,7 +5834,7 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification,
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6165,7 +5874,7 @@ func (r *NotificationsService) Insert(bucket string, notification *Notification)
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -6209,7 +5918,6 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error)
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -6253,7 +5961,7 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -6272,7 +5980,7 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6313,7 +6021,7 @@ func (r *NotificationsService) List(bucket string) *NotificationsListCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -6365,7 +6073,6 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -6409,7 +6116,7 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -6428,7 +6135,7 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6479,7 +6186,7 @@ func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAcc
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -6518,7 +6225,6 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response,
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
@@ -6578,7 +6284,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6624,7 +6330,7 @@ func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccess
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -6676,7 +6382,6 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -6722,7 +6427,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -6761,7 +6466,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6808,7 +6513,7 @@ func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAcc
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -6852,7 +6557,6 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response,
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -6897,7 +6601,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -6929,7 +6633,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -6978,7 +6682,7 @@ func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAcces
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -7030,7 +6734,6 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -7075,7 +6778,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -7107,7 +6810,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -7137,7 +6840,8 @@ type ObjectAccessControlsPatchCall struct {
header_ http.Header
}
-// Patch: Patches an ACL entry on the specified object.
+// Patch: Updates an ACL entry on the specified object. This method
+// supports patch semantics.
func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -7156,7 +6860,7 @@ func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAcce
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -7200,7 +6904,6 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
@@ -7246,12 +6949,12 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
- // "description": "Patches an ACL entry on the specified object.",
+ // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.objectAccessControls.patch",
// "parameterOrder": [
@@ -7285,7 +6988,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -7337,7 +7040,7 @@ func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAcc
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -7381,7 +7084,6 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response,
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -7427,7 +7129,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -7466,7 +7168,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -7557,7 +7259,7 @@ func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -7571,9 +7273,9 @@ func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall {
return c
}
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall {
c.ctx_ = ctx
return c
@@ -7601,7 +7303,6 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -7613,6 +7314,22 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
// Do executes the "storage.objects.compose" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
@@ -7646,7 +7363,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -7660,7 +7377,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// ],
// "parameters": {
// "destinationBucket": {
- // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.",
+ // "description": "Name of the bucket in which to store the new object.",
// "location": "path",
// "required": true,
// "type": "string"
@@ -7710,7 +7427,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -7726,7 +7443,9 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
// }
}
@@ -7872,7 +7591,7 @@ func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyC
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -7886,9 +7605,9 @@ func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall {
return c
}
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall {
c.ctx_ = ctx
return c
@@ -7916,7 +7635,6 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -7930,6 +7648,22 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
// Do executes the "storage.objects.copy" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
@@ -7963,7 +7697,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -8091,7 +7825,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -8107,7 +7841,9 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
// }
}
@@ -8178,7 +7914,7 @@ func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -8217,7 +7953,6 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
@@ -8293,7 +8028,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -8384,7 +8119,7 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -8436,7 +8171,6 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -8497,7 +8231,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -8566,7 +8300,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -8617,7 +8351,7 @@ func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPol
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -8669,7 +8403,6 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -8714,7 +8447,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -8746,7 +8479,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -8885,7 +8618,7 @@ func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -8971,7 +8704,6 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
if c.mediaInfo_ != nil {
urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
@@ -8981,12 +8713,11 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
body = new(bytes.Buffer)
reqHeaders.Set("Content-Type", "application/json")
}
- body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
+ body, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
defer cleanup()
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
- gensupport.SetGetBody(req, getBody)
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
@@ -9043,7 +8774,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -9150,7 +8881,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -9167,7 +8898,9 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ],
- // "supportsMediaUpload": true
+ // "supportsMediaDownload": true,
+ // "supportsMediaUpload": true,
+ // "useMediaDownloadService": true
// }
}
@@ -9201,15 +8934,6 @@ func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {
return c
}
-// IncludeTrailingDelimiter sets the optional parameter
-// "includeTrailingDelimiter": If true, objects that end in exactly one
-// instance of delimiter will have their metadata included in items in
-// addition to prefixes.
-func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall {
- c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter))
- return c
-}
-
// MaxResults sets the optional parameter "maxResults": Maximum number
// of items plus prefixes to return in a single page of responses. As
// duplicate prefixes are omitted, fewer total results may be returned
@@ -9247,7 +8971,7 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -9307,7 +9031,6 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -9351,7 +9074,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -9374,11 +9097,6 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
// "location": "query",
// "type": "string"
// },
- // "includeTrailingDelimiter": {
- // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- // "location": "query",
- // "type": "boolean"
- // },
// "maxResults": {
// "default": "1000",
// "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
@@ -9411,7 +9129,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// },
@@ -9470,7 +9188,8 @@ type ObjectsPatchCall struct {
header_ http.Header
}
-// Patch: Patches an object's metadata.
+// Patch: Updates an object's metadata. This method supports patch
+// semantics.
func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
@@ -9599,7 +9318,6 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
@@ -9644,12 +9362,12 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
- // "description": "Patches an object's metadata.",
+ // "description": "Updates an object's metadata. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.objects.patch",
// "parameterOrder": [
@@ -9930,7 +9648,7 @@ func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRe
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -9974,7 +9692,6 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -10021,7 +9738,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse,
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -10165,7 +9882,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse,
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -10216,7 +9933,7 @@ func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPol
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -10260,7 +9977,6 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error)
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -10305,7 +10021,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -10337,7 +10053,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -10389,7 +10105,7 @@ func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTes
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -10441,7 +10157,6 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -10486,7 +10201,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -10526,7 +10241,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -10643,7 +10358,7 @@ func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -10657,9 +10372,9 @@ func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {
return c
}
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
c.ctx_ = ctx
return c
@@ -10687,7 +10402,6 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
@@ -10699,6 +10413,22 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
// Do executes the "storage.objects.update" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
@@ -10732,7 +10462,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -10822,7 +10552,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
@@ -10837,7 +10567,9 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
// }
}
@@ -10872,15 +10604,6 @@ func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall {
return c
}
-// IncludeTrailingDelimiter sets the optional parameter
-// "includeTrailingDelimiter": If true, objects that end in exactly one
-// instance of delimiter will have their metadata included in items in
-// addition to prefixes.
-func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall {
- c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter))
- return c
-}
-
// MaxResults sets the optional parameter "maxResults": Maximum number
// of items plus prefixes to return in a single page of responses. As
// duplicate prefixes are omitted, fewer total results may be returned
@@ -10918,7 +10641,7 @@ func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
+// be billed for this request, for Requester Pays buckets.
func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -10970,7 +10693,6 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
@@ -11014,7 +10736,7 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error)
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -11037,11 +10759,6 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error)
// "location": "query",
// "type": "string"
// },
- // "includeTrailingDelimiter": {
- // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- // "location": "query",
- // "type": "boolean"
- // },
// "maxResults": {
// "default": "1000",
// "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
@@ -11074,7 +10791,7 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error)
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// },
@@ -11124,7 +10841,7 @@ func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAc
}
// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request.
+// be billed for this request, for Requester Pays buckets.
func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall {
c.urlParams_.Set("userProject", userProject)
return c
@@ -11176,7 +10893,6 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
@@ -11220,7 +10936,7 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi
},
}
target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
+ if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
@@ -11239,7 +10955,7 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi
// "type": "string"
// },
// "userProject": {
- // "description": "The project to be billed for this request.",
+ // "description": "The project to be billed for this request, for Requester Pays buckets.",
// "location": "query",
// "type": "string"
// }
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
index 1ecbd9761..a04956d98 100644
--- a/vendor/google.golang.org/api/transport/http/dial.go
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Google LLC
+// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -32,74 +32,43 @@ import (
// service, configured with the given ClientOptions. It also returns the endpoint
// for the service as specified in the options.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
- settings, err := newSettings(opts)
- if err != nil {
- return nil, "", err
- }
- // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
- if settings.HTTPClient != nil {
- return settings.HTTPClient, settings.Endpoint, nil
- }
- trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings)
- if err != nil {
- return nil, "", err
- }
- return &http.Client{Transport: trans}, settings.Endpoint, nil
-}
-
-// NewTransport creates an http.RoundTripper for use communicating with a Google
-// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base.
-func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) {
- settings, err := newSettings(opts)
- if err != nil {
- return nil, err
- }
- if settings.HTTPClient != nil {
- return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
- }
- return newTransport(ctx, base, settings)
-}
-
-func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) {
- trans := base
- trans = userAgentTransport{
- base: trans,
- userAgent: settings.UserAgent,
- }
- trans = addOCTransport(trans)
- switch {
- case settings.NoAuth:
- // Do nothing.
- case settings.APIKey != "":
- trans = &transport.APIKey{
- Transport: trans,
- Key: settings.APIKey,
- }
- default:
- creds, err := internal.Creds(ctx, settings)
- if err != nil {
- return nil, err
- }
- trans = &oauth2.Transport{
- Base: trans,
- Source: creds.TokenSource,
- }
- }
- return trans, nil
-}
-
-func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
- if err := o.Validate(); err != nil {
- return nil, err
- }
if o.GRPCConn != nil {
- return nil, errors.New("unsupported gRPC connection specified")
+ return nil, "", errors.New("unsupported gRPC connection specified")
}
- return &o, nil
+ // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
+ if o.HTTPClient != nil {
+ return o.HTTPClient, o.Endpoint, nil
+ }
+ if o.APIKey != "" {
+ hc := &http.Client{
+ Transport: &transport.APIKey{
+ Key: o.APIKey,
+ Transport: userAgentTransport{
+ base: baseTransport(ctx),
+ userAgent: o.UserAgent,
+ },
+ },
+ }
+ return hc, o.Endpoint, nil
+ }
+ creds, err := internal.Creds(ctx, &o)
+ if err != nil {
+ return nil, "", err
+ }
+ hc := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: creds.TokenSource,
+ Base: userAgentTransport{
+ base: baseTransport(ctx),
+ userAgent: o.UserAgent,
+ },
+ },
+ }
+ return hc, o.Endpoint, nil
}
type userAgentTransport struct {
@@ -128,9 +97,9 @@ func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error)
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
var appengineUrlfetchHook func(context.Context) http.RoundTripper
-// defaultBaseTransport returns the base HTTP transport.
+// baseTransport returns the base HTTP transport.
// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
-func defaultBaseTransport(ctx context.Context) http.RoundTripper {
+func baseTransport(ctx context.Context) http.RoundTripper {
if appengineUrlfetchHook != nil {
return appengineUrlfetchHook(ctx)
}
diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go
index 313e0bd5b..0cdef74ac 100644
--- a/vendor/google.golang.org/api/transport/http/dial_appengine.go
+++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go
@@ -1,4 +1,4 @@
-// Copyright 2016 Google LLC
+// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/api/transport/http/go18.go b/vendor/google.golang.org/api/transport/http/go18.go
deleted file mode 100644
index b5eafbb4e..000000000
--- a/vendor/google.golang.org/api/transport/http/go18.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.8
-
-package http
-
-import (
- "net/http"
-
- "contrib.go.opencensus.io/exporter/stackdriver/propagation"
- "go.opencensus.io/plugin/ochttp"
-)
-
-func addOCTransport(trans http.RoundTripper) http.RoundTripper {
- return &ochttp.Transport{
- Base: trans,
- Propagation: &propagation.HTTPFormat{},
- }
-}
diff --git a/vendor/google.golang.org/api/transport/http/not_go18.go b/vendor/google.golang.org/api/transport/http/not_go18.go
deleted file mode 100644
index b8e1abe92..000000000
--- a/vendor/google.golang.org/api/transport/http/not_go18.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.8
-
-package http
-
-import "net/http"
-
-func addOCTransport(trans http.RoundTripper) http.RoundTripper { return trans }
diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod
new file mode 100644
index 000000000..f449359d2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/go.mod
@@ -0,0 +1,7 @@
+module google.golang.org/appengine
+
+require (
+ github.com/golang/protobuf v1.2.0
+ golang.org/x/net v0.0.0-20180724234803-3673e40ba225
+ golang.org/x/text v0.3.0
+)
diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum
new file mode 100644
index 000000000..5e644c2e9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/go.sum
@@ -0,0 +1,3 @@
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
index 89d3ea9c2..9a2ff77ab 100644
--- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -1,26 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
-/*
-Package app_identity is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/app_identity/app_identity_service.proto
-
-It has these top-level messages:
- AppIdentityServiceError
- SignForAppRequest
- SignForAppResponse
- GetPublicCertificateForAppRequest
- PublicCertificate
- GetPublicCertificateForAppResponse
- GetServiceAccountNameRequest
- GetServiceAccountNameResponse
- GetAccessTokenRequest
- GetAccessTokenResponse
- GetDefaultGcsBucketNameRequest
- GetDefaultGcsBucketNameResponse
-*/
package app_identity
import proto "github.com/golang/protobuf/proto"
@@ -89,27 +69,69 @@ func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
return nil
}
func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{0, 0}
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0}
}
type AppIdentityServiceError struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
-func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
-func (*AppIdentityServiceError) ProtoMessage() {}
-func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+func (*AppIdentityServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0}
+}
+func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b)
+}
+func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic)
+}
+func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AppIdentityServiceError.Merge(dst, src)
+}
+func (m *AppIdentityServiceError) XXX_Size() int {
+ return xxx_messageInfo_AppIdentityServiceError.Size(m)
+}
+func (m *AppIdentityServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo
type SignForAppRequest struct {
- BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
-func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
-func (*SignForAppRequest) ProtoMessage() {}
-func (*SignForAppRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+func (*SignForAppRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1}
+}
+func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b)
+}
+func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic)
+}
+func (dst *SignForAppRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignForAppRequest.Merge(dst, src)
+}
+func (m *SignForAppRequest) XXX_Size() int {
+ return xxx_messageInfo_SignForAppRequest.Size(m)
+}
+func (m *SignForAppRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignForAppRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo
func (m *SignForAppRequest) GetBytesToSign() []byte {
if m != nil {
@@ -119,15 +141,36 @@ func (m *SignForAppRequest) GetBytesToSign() []byte {
}
type SignForAppResponse struct {
- KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
- SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
-func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
-func (*SignForAppResponse) ProtoMessage() {}
-func (*SignForAppResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+func (*SignForAppResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2}
+}
+func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b)
+}
+func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic)
+}
+func (dst *SignForAppResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignForAppResponse.Merge(dst, src)
+}
+func (m *SignForAppResponse) XXX_Size() int {
+ return xxx_messageInfo_SignForAppResponse.Size(m)
+}
+func (m *SignForAppResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignForAppResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo
func (m *SignForAppResponse) GetKeyName() string {
if m != nil && m.KeyName != nil {
@@ -144,26 +187,66 @@ func (m *SignForAppResponse) GetSignatureBytes() []byte {
}
type GetPublicCertificateForAppRequest struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{3}
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3}
}
+func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b)
+}
+func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src)
+}
+func (m *GetPublicCertificateForAppRequest) XXX_Size() int {
+ return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m)
+}
+func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo
type PublicCertificate struct {
- KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
- X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
-func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
-func (*PublicCertificate) ProtoMessage() {}
-func (*PublicCertificate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+func (*PublicCertificate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4}
+}
+func (m *PublicCertificate) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PublicCertificate.Unmarshal(m, b)
+}
+func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic)
+}
+func (dst *PublicCertificate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PublicCertificate.Merge(dst, src)
+}
+func (m *PublicCertificate) XXX_Size() int {
+ return xxx_messageInfo_PublicCertificate.Size(m)
+}
+func (m *PublicCertificate) XXX_DiscardUnknown() {
+ xxx_messageInfo_PublicCertificate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo
func (m *PublicCertificate) GetKeyName() string {
if m != nil && m.KeyName != nil {
@@ -182,15 +265,34 @@ func (m *PublicCertificate) GetX509CertificatePem() string {
type GetPublicCertificateForAppResponse struct {
PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"`
MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{5}
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5}
}
+func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b)
+}
+func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src)
+}
+func (m *GetPublicCertificateForAppResponse) XXX_Size() int {
+ return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m)
+}
+func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo
func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
if m != nil {
@@ -207,23 +309,65 @@ func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int
}
type GetServiceAccountNameRequest struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
-func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetServiceAccountNameRequest) ProtoMessage() {}
-func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6}
+}
+func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b)
+}
+func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src)
+}
+func (m *GetServiceAccountNameRequest) XXX_Size() int {
+ return xxx_messageInfo_GetServiceAccountNameRequest.Size(m)
+}
+func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo
type GetServiceAccountNameResponse struct {
- ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
-func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetServiceAccountNameResponse) ProtoMessage() {}
-func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7}
+}
+func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b)
+}
+func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src)
+}
+func (m *GetServiceAccountNameResponse) XXX_Size() int {
+ return xxx_messageInfo_GetServiceAccountNameResponse.Size(m)
+}
+func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo
func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
if m != nil && m.ServiceAccountName != nil {
@@ -233,16 +377,37 @@ func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
}
type GetAccessTokenRequest struct {
- Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
- ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"`
- ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
-func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
-func (*GetAccessTokenRequest) ProtoMessage() {}
-func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8}
+}
+func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b)
+}
+func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src)
+}
+func (m *GetAccessTokenRequest) XXX_Size() int {
+ return xxx_messageInfo_GetAccessTokenRequest.Size(m)
+}
+func (m *GetAccessTokenRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo
func (m *GetAccessTokenRequest) GetScope() []string {
if m != nil {
@@ -266,15 +431,36 @@ func (m *GetAccessTokenRequest) GetServiceAccountName() string {
}
type GetAccessTokenResponse struct {
- AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"`
- ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
-func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
-func (*GetAccessTokenResponse) ProtoMessage() {}
-func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9}
+}
+func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b)
+}
+func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src)
+}
+func (m *GetAccessTokenResponse) XXX_Size() int {
+ return xxx_messageInfo_GetAccessTokenResponse.Size(m)
+}
+func (m *GetAccessTokenResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo
func (m *GetAccessTokenResponse) GetAccessToken() string {
if m != nil && m.AccessToken != nil {
@@ -291,25 +477,65 @@ func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
}
type GetDefaultGcsBucketNameRequest struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
-func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
-func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10}
+}
+func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b)
+}
+func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src)
+}
+func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int {
+ return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m)
+}
+func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo
type GetDefaultGcsBucketNameResponse struct {
- DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{11}
+ return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11}
}
+func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b)
+}
+func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src)
+}
+func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int {
+ return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m)
+}
+func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo
func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
if m != nil && m.DefaultGcsBucketName != nil {
@@ -334,10 +560,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{
// 676 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58,
0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e,
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
index 6205a7aee..db4777e68 100644
--- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -1,21 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/base/api_base.proto
-/*
-Package base is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/base/api_base.proto
-
-It has these top-level messages:
- StringProto
- Integer32Proto
- Integer64Proto
- BoolProto
- DoubleProto
- BytesProto
- VoidProto
-*/
package base
import proto "github.com/golang/protobuf/proto"
@@ -34,14 +19,35 @@ var _ = math.Inf
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type StringProto struct {
- Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *StringProto) Reset() { *m = StringProto{} }
-func (m *StringProto) String() string { return proto.CompactTextString(m) }
-func (*StringProto) ProtoMessage() {}
-func (*StringProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+func (*StringProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
+}
+func (m *StringProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StringProto.Unmarshal(m, b)
+}
+func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
+}
+func (dst *StringProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StringProto.Merge(dst, src)
+}
+func (m *StringProto) XXX_Size() int {
+ return xxx_messageInfo_StringProto.Size(m)
+}
+func (m *StringProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_StringProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringProto proto.InternalMessageInfo
func (m *StringProto) GetValue() string {
if m != nil && m.Value != nil {
@@ -51,14 +57,35 @@ func (m *StringProto) GetValue() string {
}
type Integer32Proto struct {
- Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
-func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer32Proto) ProtoMessage() {}
-func (*Integer32Proto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+func (*Integer32Proto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
+}
+func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
+}
+func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
+}
+func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Integer32Proto.Merge(dst, src)
+}
+func (m *Integer32Proto) XXX_Size() int {
+ return xxx_messageInfo_Integer32Proto.Size(m)
+}
+func (m *Integer32Proto) XXX_DiscardUnknown() {
+ xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
func (m *Integer32Proto) GetValue() int32 {
if m != nil && m.Value != nil {
@@ -68,14 +95,35 @@ func (m *Integer32Proto) GetValue() int32 {
}
type Integer64Proto struct {
- Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
-func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer64Proto) ProtoMessage() {}
-func (*Integer64Proto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+func (*Integer64Proto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
+}
+func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
+}
+func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
+}
+func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Integer64Proto.Merge(dst, src)
+}
+func (m *Integer64Proto) XXX_Size() int {
+ return xxx_messageInfo_Integer64Proto.Size(m)
+}
+func (m *Integer64Proto) XXX_DiscardUnknown() {
+ xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
func (m *Integer64Proto) GetValue() int64 {
if m != nil && m.Value != nil {
@@ -85,14 +133,35 @@ func (m *Integer64Proto) GetValue() int64 {
}
type BoolProto struct {
- Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *BoolProto) Reset() { *m = BoolProto{} }
-func (m *BoolProto) String() string { return proto.CompactTextString(m) }
-func (*BoolProto) ProtoMessage() {}
-func (*BoolProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+func (*BoolProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
+}
+func (m *BoolProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BoolProto.Unmarshal(m, b)
+}
+func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
+}
+func (dst *BoolProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BoolProto.Merge(dst, src)
+}
+func (m *BoolProto) XXX_Size() int {
+ return xxx_messageInfo_BoolProto.Size(m)
+}
+func (m *BoolProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_BoolProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolProto proto.InternalMessageInfo
func (m *BoolProto) GetValue() bool {
if m != nil && m.Value != nil {
@@ -102,14 +171,35 @@ func (m *BoolProto) GetValue() bool {
}
type DoubleProto struct {
- Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *DoubleProto) Reset() { *m = DoubleProto{} }
-func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
-func (*DoubleProto) ProtoMessage() {}
-func (*DoubleProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+func (*DoubleProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
+}
+func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
+}
+func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
+}
+func (dst *DoubleProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DoubleProto.Merge(dst, src)
+}
+func (m *DoubleProto) XXX_Size() int {
+ return xxx_messageInfo_DoubleProto.Size(m)
+}
+func (m *DoubleProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_DoubleProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
func (m *DoubleProto) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -119,14 +209,35 @@ func (m *DoubleProto) GetValue() float64 {
}
type BytesProto struct {
- Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *BytesProto) Reset() { *m = BytesProto{} }
-func (m *BytesProto) String() string { return proto.CompactTextString(m) }
-func (*BytesProto) ProtoMessage() {}
-func (*BytesProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+func (*BytesProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
+}
+func (m *BytesProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BytesProto.Unmarshal(m, b)
+}
+func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
+}
+func (dst *BytesProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BytesProto.Merge(dst, src)
+}
+func (m *BytesProto) XXX_Size() int {
+ return xxx_messageInfo_BytesProto.Size(m)
+}
+func (m *BytesProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_BytesProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesProto proto.InternalMessageInfo
func (m *BytesProto) GetValue() []byte {
if m != nil {
@@ -136,13 +247,34 @@ func (m *BytesProto) GetValue() []byte {
}
type VoidProto struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *VoidProto) Reset() { *m = VoidProto{} }
-func (m *VoidProto) String() string { return proto.CompactTextString(m) }
-func (*VoidProto) ProtoMessage() {}
-func (*VoidProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
+func (*VoidProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
+}
+func (m *VoidProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_VoidProto.Unmarshal(m, b)
+}
+func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
+}
+func (dst *VoidProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VoidProto.Merge(dst, src)
+}
+func (m *VoidProto) XXX_Size() int {
+ return xxx_messageInfo_VoidProto.Size(m)
+}
+func (m *VoidProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_VoidProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VoidProto proto.InternalMessageInfo
func init() {
proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
@@ -155,10 +287,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
// 199 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
index 393342c13..2fb748289 100644
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -1,52 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
-/*
-Package datastore is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/datastore/datastore_v3.proto
-
-It has these top-level messages:
- Action
- PropertyValue
- Property
- Path
- Reference
- User
- EntityProto
- CompositeProperty
- Index
- CompositeIndex
- IndexPostfix
- IndexPosition
- Snapshot
- InternalHeader
- Transaction
- Query
- CompiledQuery
- CompiledCursor
- Cursor
- Error
- Cost
- GetRequest
- GetResponse
- PutRequest
- PutResponse
- TouchRequest
- TouchResponse
- DeleteRequest
- DeleteResponse
- NextRequest
- QueryResult
- AllocateIdsRequest
- AllocateIdsResponse
- CompositeIndices
- AddActionsRequest
- AddActionsResponse
- BeginTransactionRequest
- CommitResponse
-*/
package datastore
import proto "github.com/golang/protobuf/proto"
@@ -150,7 +104,9 @@ func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
*x = Property_Meaning(value)
return nil
}
-func (Property_Meaning) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
+}
type Property_FtsTokenizationOption int32
@@ -185,7 +141,7 @@ func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
return nil
}
func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{2, 1}
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
}
type EntityProto_Kind int32
@@ -223,7 +179,9 @@ func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
*x = EntityProto_Kind(value)
return nil
}
-func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
+func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
+}
type Index_Property_Direction int32
@@ -258,7 +216,7 @@ func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
return nil
}
func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{8, 0, 0}
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
}
type CompositeIndex_State int32
@@ -299,7 +257,9 @@ func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
*x = CompositeIndex_State(value)
return nil
}
-func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
+func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
+}
type Snapshot_Status int32
@@ -333,7 +293,9 @@ func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
*x = Snapshot_Status(value)
return nil
}
-func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} }
+func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
+}
type Query_Hint int32
@@ -370,7 +332,9 @@ func (x *Query_Hint) UnmarshalJSON(data []byte) error {
*x = Query_Hint(value)
return nil
}
-func (Query_Hint) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 0} }
+func (Query_Hint) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
+}
type Query_Filter_Operator int32
@@ -419,7 +383,9 @@ func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
*x = Query_Filter_Operator(value)
return nil
}
-func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 0, 0} }
+func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
+}
type Query_Order_Direction int32
@@ -453,7 +419,9 @@ func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
*x = Query_Order_Direction(value)
return nil
}
-func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 1, 0} }
+func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
+}
type Error_ErrorCode int32
@@ -514,7 +482,9 @@ func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
*x = Error_ErrorCode(value)
return nil
}
-func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
+func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
+}
type PutRequest_AutoIdPolicy int32
@@ -548,7 +518,9 @@ func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
*x = PutRequest_AutoIdPolicy(value)
return nil
}
-func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} }
+func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
+}
type BeginTransactionRequest_TransactionMode int32
@@ -586,33 +558,75 @@ func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) err
return nil
}
func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{36, 0}
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
}
type Action struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Action) Reset() { *m = Action{} }
-func (m *Action) String() string { return proto.CompactTextString(m) }
-func (*Action) ProtoMessage() {}
-func (*Action) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+func (*Action) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
+}
+func (m *Action) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Action.Unmarshal(m, b)
+}
+func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Action.Marshal(b, m, deterministic)
+}
+func (dst *Action) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Action.Merge(dst, src)
+}
+func (m *Action) XXX_Size() int {
+ return xxx_messageInfo_Action.Size(m)
+}
+func (m *Action) XXX_DiscardUnknown() {
+ xxx_messageInfo_Action.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Action proto.InternalMessageInfo
type PropertyValue struct {
- Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
- BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
- StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
- Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
- Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
- Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PropertyValue) Reset() { *m = PropertyValue{} }
-func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue) ProtoMessage() {}
-func (*PropertyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+func (*PropertyValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
+}
+func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
+}
+func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue.Merge(dst, src)
+}
+func (m *PropertyValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue.Size(m)
+}
+func (m *PropertyValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
func (m *PropertyValue) GetInt64Value() int64 {
if m != nil && m.Int64Value != nil {
@@ -664,15 +678,36 @@ func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
}
type PropertyValue_PointValue struct {
- X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
- Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
-func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_PointValue) ProtoMessage() {}
-func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
+}
+func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
+}
+func (m *PropertyValue_PointValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_PointValue.Size(m)
+}
+func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
func (m *PropertyValue_PointValue) GetX() float64 {
if m != nil && m.X != nil {
@@ -689,18 +724,39 @@ func (m *PropertyValue_PointValue) GetY() float64 {
}
type PropertyValue_UserValue struct {
- Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
-func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_UserValue) ProtoMessage() {}
-func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 1} }
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
+}
+func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
+}
+func (m *PropertyValue_UserValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_UserValue.Size(m)
+}
+func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
func (m *PropertyValue_UserValue) GetEmail() string {
if m != nil && m.Email != nil {
@@ -738,16 +794,37 @@ func (m *PropertyValue_UserValue) GetFederatedProvider() string {
}
type PropertyValue_ReferenceValue struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
-func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 2} }
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
+}
+func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
+}
+func (m *PropertyValue_ReferenceValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
+}
+func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
func (m *PropertyValue_ReferenceValue) GetApp() string {
if m != nil && m.App != nil {
@@ -771,10 +848,12 @@ func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_Referen
}
type PropertyValue_ReferenceValue_PathElement struct {
- Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
@@ -783,8 +862,25 @@ func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{1, 2, 0}
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
if m != nil && m.Type != nil {
@@ -816,13 +912,34 @@ type Property struct {
Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Property) Reset() { *m = Property{} }
-func (m *Property) String() string { return proto.CompactTextString(m) }
-func (*Property) ProtoMessage() {}
-func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+func (*Property) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
+}
+func (m *Property) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Property.Unmarshal(m, b)
+}
+func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Property.Marshal(b, m, deterministic)
+}
+func (dst *Property) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Property.Merge(dst, src)
+}
+func (m *Property) XXX_Size() int {
+ return xxx_messageInfo_Property.Size(m)
+}
+func (m *Property) XXX_DiscardUnknown() {
+ xxx_messageInfo_Property.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Property proto.InternalMessageInfo
const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
const Default_Property_Searchable bool = false
@@ -885,14 +1002,35 @@ func (m *Property) GetLocale() string {
}
type Path struct {
- Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Path) Reset() { *m = Path{} }
-func (m *Path) String() string { return proto.CompactTextString(m) }
-func (*Path) ProtoMessage() {}
-func (*Path) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+func (*Path) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
+}
+func (m *Path) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Path.Unmarshal(m, b)
+}
+func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Path.Marshal(b, m, deterministic)
+}
+func (dst *Path) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Path.Merge(dst, src)
+}
+func (m *Path) XXX_Size() int {
+ return xxx_messageInfo_Path.Size(m)
+}
+func (m *Path) XXX_DiscardUnknown() {
+ xxx_messageInfo_Path.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Path proto.InternalMessageInfo
func (m *Path) GetElement() []*Path_Element {
if m != nil {
@@ -902,16 +1040,37 @@ func (m *Path) GetElement() []*Path_Element {
}
type Path_Element struct {
- Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Path_Element) Reset() { *m = Path_Element{} }
-func (m *Path_Element) String() string { return proto.CompactTextString(m) }
-func (*Path_Element) ProtoMessage() {}
-func (*Path_Element) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+func (*Path_Element) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
+}
+func (m *Path_Element) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Path_Element.Unmarshal(m, b)
+}
+func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
+}
+func (dst *Path_Element) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Path_Element.Merge(dst, src)
+}
+func (m *Path_Element) XXX_Size() int {
+ return xxx_messageInfo_Path_Element.Size(m)
+}
+func (m *Path_Element) XXX_DiscardUnknown() {
+ xxx_messageInfo_Path_Element.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Path_Element proto.InternalMessageInfo
func (m *Path_Element) GetType() string {
if m != nil && m.Type != nil {
@@ -935,16 +1094,37 @@ func (m *Path_Element) GetName() string {
}
type Reference struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Reference) Reset() { *m = Reference{} }
-func (m *Reference) String() string { return proto.CompactTextString(m) }
-func (*Reference) ProtoMessage() {}
-func (*Reference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+func (*Reference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
+}
+func (m *Reference) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Reference.Unmarshal(m, b)
+}
+func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
+}
+func (dst *Reference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Reference.Merge(dst, src)
+}
+func (m *Reference) XXX_Size() int {
+ return xxx_messageInfo_Reference.Size(m)
+}
+func (m *Reference) XXX_DiscardUnknown() {
+ xxx_messageInfo_Reference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Reference proto.InternalMessageInfo
func (m *Reference) GetApp() string {
if m != nil && m.App != nil {
@@ -968,18 +1148,39 @@ func (m *Reference) GetPath() *Path {
}
type User struct {
- Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_User.Unmarshal(m, b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_User.Marshal(b, m, deterministic)
+}
+func (dst *User) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_User.Merge(dst, src)
+}
+func (m *User) XXX_Size() int {
+ return xxx_messageInfo_User.Size(m)
+}
+func (m *User) XXX_DiscardUnknown() {
+ xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
func (m *User) GetEmail() string {
if m != nil && m.Email != nil {
@@ -1017,21 +1218,42 @@ func (m *User) GetFederatedProvider() string {
}
type EntityProto struct {
- Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
- EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
- Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
- Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
- KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
- Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *EntityProto) Reset() { *m = EntityProto{} }
-func (m *EntityProto) String() string { return proto.CompactTextString(m) }
-func (*EntityProto) ProtoMessage() {}
-func (*EntityProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+func (*EntityProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
+}
+func (m *EntityProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EntityProto.Unmarshal(m, b)
+}
+func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
+}
+func (dst *EntityProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EntityProto.Merge(dst, src)
+}
+func (m *EntityProto) XXX_Size() int {
+ return xxx_messageInfo_EntityProto.Size(m)
+}
+func (m *EntityProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_EntityProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EntityProto proto.InternalMessageInfo
func (m *EntityProto) GetKey() *Reference {
if m != nil {
@@ -1090,15 +1312,36 @@ func (m *EntityProto) GetRank() int32 {
}
type CompositeProperty struct {
- IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
-func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
-func (*CompositeProperty) ProtoMessage() {}
-func (*CompositeProperty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+func (*CompositeProperty) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
+}
+func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
+}
+func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
+}
+func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeProperty.Merge(dst, src)
+}
+func (m *CompositeProperty) XXX_Size() int {
+ return xxx_messageInfo_CompositeProperty.Size(m)
+}
+func (m *CompositeProperty) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
func (m *CompositeProperty) GetIndexId() int64 {
if m != nil && m.IndexId != nil {
@@ -1115,16 +1358,37 @@ func (m *CompositeProperty) GetValue() []string {
}
type Index struct {
- EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
- Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
- Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Index) Reset() { *m = Index{} }
-func (m *Index) String() string { return proto.CompactTextString(m) }
-func (*Index) ProtoMessage() {}
-func (*Index) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+func (*Index) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
+}
+func (m *Index) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Index.Unmarshal(m, b)
+}
+func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Index.Marshal(b, m, deterministic)
+}
+func (dst *Index) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Index.Merge(dst, src)
+}
+func (m *Index) XXX_Size() int {
+ return xxx_messageInfo_Index.Size(m)
+}
+func (m *Index) XXX_DiscardUnknown() {
+ xxx_messageInfo_Index.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Index proto.InternalMessageInfo
func (m *Index) GetEntityType() string {
if m != nil && m.EntityType != nil {
@@ -1148,15 +1412,36 @@ func (m *Index) GetProperty() []*Index_Property {
}
type Index_Property struct {
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Index_Property) Reset() { *m = Index_Property{} }
-func (m *Index_Property) String() string { return proto.CompactTextString(m) }
-func (*Index_Property) ProtoMessage() {}
-func (*Index_Property) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} }
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+func (*Index_Property) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
+}
+func (m *Index_Property) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Index_Property.Unmarshal(m, b)
+}
+func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
+}
+func (dst *Index_Property) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Index_Property.Merge(dst, src)
+}
+func (m *Index_Property) XXX_Size() int {
+ return xxx_messageInfo_Index_Property.Size(m)
+}
+func (m *Index_Property) XXX_DiscardUnknown() {
+ xxx_messageInfo_Index_Property.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Index_Property proto.InternalMessageInfo
const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
@@ -1175,18 +1460,39 @@ func (m *Index_Property) GetDirection() Index_Property_Direction {
}
type CompositeIndex struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
- Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
- State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
- OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
-func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndex) ProtoMessage() {}
-func (*CompositeIndex) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+func (*CompositeIndex) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
+}
+func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
+}
+func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
+}
+func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeIndex.Merge(dst, src)
+}
+func (m *CompositeIndex) XXX_Size() int {
+ return xxx_messageInfo_CompositeIndex.Size(m)
+}
+func (m *CompositeIndex) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
const Default_CompositeIndex_OnlyUseIfRequired bool = false
@@ -1226,16 +1532,37 @@ func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
}
type IndexPostfix struct {
- IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
- Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
-func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix) ProtoMessage() {}
-func (*IndexPostfix) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+func (*IndexPostfix) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
+}
+func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
+}
+func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
+}
+func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPostfix.Merge(dst, src)
+}
+func (m *IndexPostfix) XXX_Size() int {
+ return xxx_messageInfo_IndexPostfix.Size(m)
+}
+func (m *IndexPostfix) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
const Default_IndexPostfix_Before bool = true
@@ -1261,15 +1588,36 @@ func (m *IndexPostfix) GetBefore() bool {
}
type IndexPostfix_IndexValue struct {
- PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
-func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix_IndexValue) ProtoMessage() {}
-func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
+}
+func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
+}
+func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
+}
+func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
+}
+func (m *IndexPostfix_IndexValue) XXX_Size() int {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
+}
+func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
func (m *IndexPostfix_IndexValue) GetPropertyName() string {
if m != nil && m.PropertyName != nil {
@@ -1286,15 +1634,36 @@ func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
}
type IndexPosition struct {
- Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *IndexPosition) Reset() { *m = IndexPosition{} }
-func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
-func (*IndexPosition) ProtoMessage() {}
-func (*IndexPosition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+func (*IndexPosition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
+}
+func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
+}
+func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
+}
+func (dst *IndexPosition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPosition.Merge(dst, src)
+}
+func (m *IndexPosition) XXX_Size() int {
+ return xxx_messageInfo_IndexPosition.Size(m)
+}
+func (m *IndexPosition) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPosition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
const Default_IndexPosition_Before bool = true
@@ -1313,14 +1682,35 @@ func (m *IndexPosition) GetBefore() bool {
}
type Snapshot struct {
- Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Snapshot.Unmarshal(m, b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+}
+func (dst *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(dst, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return xxx_messageInfo_Snapshot.Size(m)
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
func (m *Snapshot) GetTs() int64 {
if m != nil && m.Ts != nil {
@@ -1330,14 +1720,35 @@ func (m *Snapshot) GetTs() int64 {
}
type InternalHeader struct {
- Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *InternalHeader) Reset() { *m = InternalHeader{} }
-func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
-func (*InternalHeader) ProtoMessage() {}
-func (*InternalHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+func (*InternalHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
+}
+func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
+}
+func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
+}
+func (dst *InternalHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalHeader.Merge(dst, src)
+}
+func (m *InternalHeader) XXX_Size() int {
+ return xxx_messageInfo_InternalHeader.Size(m)
+}
+func (m *InternalHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
func (m *InternalHeader) GetQos() string {
if m != nil && m.Qos != nil {
@@ -1347,17 +1758,38 @@ func (m *InternalHeader) GetQos() string {
}
type Transaction struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
- App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
- MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Transaction) Reset() { *m = Transaction{} }
-func (m *Transaction) String() string { return proto.CompactTextString(m) }
-func (*Transaction) ProtoMessage() {}
-func (*Transaction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+func (*Transaction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
+}
+func (m *Transaction) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Transaction.Unmarshal(m, b)
+}
+func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
+}
+func (dst *Transaction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Transaction.Merge(dst, src)
+}
+func (m *Transaction) XXX_Size() int {
+ return xxx_messageInfo_Transaction.Size(m)
+}
+func (m *Transaction) XXX_DiscardUnknown() {
+ xxx_messageInfo_Transaction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Transaction proto.InternalMessageInfo
const Default_Transaction_MarkChanges bool = false
@@ -1390,40 +1822,61 @@ func (m *Transaction) GetMarkChanges() bool {
}
type Query struct {
- Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
- Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
- SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
- Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
- Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
- Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
- KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
- Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
- Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
- FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
- PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
- Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
- MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
- SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
- PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Query) Reset() { *m = Query{} }
-func (m *Query) String() string { return proto.CompactTextString(m) }
-func (*Query) ProtoMessage() {}
-func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+func (*Query) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
+}
+func (m *Query) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query.Unmarshal(m, b)
+}
+func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query.Marshal(b, m, deterministic)
+}
+func (dst *Query) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query.Merge(dst, src)
+}
+func (m *Query) XXX_Size() int {
+ return xxx_messageInfo_Query.Size(m)
+}
+func (m *Query) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query proto.InternalMessageInfo
const Default_Query_Offset int32 = 0
const Default_Query_RequirePerfectPlan bool = false
@@ -1621,15 +2074,36 @@ func (m *Query) GetPersistOffset() bool {
}
type Query_Filter struct {
- Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Query_Filter) Reset() { *m = Query_Filter{} }
-func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
-func (*Query_Filter) ProtoMessage() {}
-func (*Query_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 0} }
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+func (*Query_Filter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
+}
+func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
+}
+func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
+}
+func (dst *Query_Filter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query_Filter.Merge(dst, src)
+}
+func (m *Query_Filter) XXX_Size() int {
+ return xxx_messageInfo_Query_Filter.Size(m)
+}
+func (m *Query_Filter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query_Filter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
func (m *Query_Filter) GetOp() Query_Filter_Operator {
if m != nil && m.Op != nil {
@@ -1646,15 +2120,36 @@ func (m *Query_Filter) GetProperty() []*Property {
}
type Query_Order struct {
- Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
- Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Query_Order) Reset() { *m = Query_Order{} }
-func (m *Query_Order) String() string { return proto.CompactTextString(m) }
-func (*Query_Order) ProtoMessage() {}
-func (*Query_Order) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 1} }
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+func (*Query_Order) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
+}
+func (m *Query_Order) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query_Order.Unmarshal(m, b)
+}
+func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
+}
+func (dst *Query_Order) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query_Order.Merge(dst, src)
+}
+func (m *Query_Order) XXX_Size() int {
+ return xxx_messageInfo_Query_Order.Size(m)
+}
+func (m *Query_Order) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query_Order.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query_Order proto.InternalMessageInfo
const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
@@ -1673,22 +2168,43 @@ func (m *Query_Order) GetDirection() Query_Order_Direction {
}
type CompiledQuery struct {
- Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
- Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
- IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
- KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
- Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
-func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery) ProtoMessage() {}
-func (*CompiledQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+func (*CompiledQuery) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
+}
+func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
+}
+func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery.Merge(dst, src)
+}
+func (m *CompiledQuery) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery.Size(m)
+}
+func (m *CompiledQuery) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
const Default_CompiledQuery_Offset int32 = 0
@@ -1764,13 +2280,34 @@ type CompiledQuery_PrimaryScan struct {
StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
-func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
-func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16, 0} }
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
if m != nil && m.IndexName != nil {
@@ -1829,16 +2366,37 @@ func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
}
type CompiledQuery_MergeJoinScan struct {
- IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
- PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
- ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
-func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
-func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16, 1} }
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
@@ -1864,16 +2422,37 @@ func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
}
type CompiledQuery_EntityFilter struct {
- Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
- Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
-func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_EntityFilter) ProtoMessage() {}
-func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16, 2} }
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
+}
+func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
+}
+func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
+}
+func (m *CompiledQuery_EntityFilter) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
+}
+func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
const Default_CompiledQuery_EntityFilter_Distinct bool = false
@@ -1899,14 +2478,35 @@ func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
}
type CompiledCursor struct {
- Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
-func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor) ProtoMessage() {}
-func (*CompiledCursor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+func (*CompiledCursor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
+}
+func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
+}
+func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor.Merge(dst, src)
+}
+func (m *CompiledCursor) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor.Size(m)
+}
+func (m *CompiledCursor) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
if m != nil {
@@ -1916,17 +2516,38 @@ func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
}
type CompiledCursor_Position struct {
- StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
- Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
- StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
-func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position) ProtoMessage() {}
-func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17, 0} }
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
+}
+func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
+}
+func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
+}
+func (m *CompiledCursor_Position) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor_Position.Size(m)
+}
+func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
const Default_CompiledCursor_Position_StartInclusive bool = true
@@ -1959,17 +2580,36 @@ func (m *CompiledCursor_Position) GetStartInclusive() bool {
}
type CompiledCursor_Position_IndexValue struct {
- Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
- Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{17, 0, 0}
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
}
+func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
if m != nil && m.Property != nil {
@@ -1986,15 +2626,36 @@ func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
}
type Cursor struct {
- Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
- App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Cursor) Reset() { *m = Cursor{} }
-func (m *Cursor) String() string { return proto.CompactTextString(m) }
-func (*Cursor) ProtoMessage() {}
-func (*Cursor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+func (*Cursor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
+}
+func (m *Cursor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cursor.Unmarshal(m, b)
+}
+func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
+}
+func (dst *Cursor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cursor.Merge(dst, src)
+}
+func (m *Cursor) XXX_Size() int {
+ return xxx_messageInfo_Cursor.Size(m)
+}
+func (m *Cursor) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cursor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cursor proto.InternalMessageInfo
func (m *Cursor) GetCursor() uint64 {
if m != nil && m.Cursor != nil {
@@ -2011,13 +2672,34 @@ func (m *Cursor) GetApp() string {
}
type Error struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Error) Reset() { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage() {}
-func (*Error) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+func (*Error) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
+}
+func (m *Error) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Error.Unmarshal(m, b)
+}
+func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Error.Marshal(b, m, deterministic)
+}
+func (dst *Error) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Error.Merge(dst, src)
+}
+func (m *Error) XXX_Size() int {
+ return xxx_messageInfo_Error.Size(m)
+}
+func (m *Error) XXX_DiscardUnknown() {
+ xxx_messageInfo_Error.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Error proto.InternalMessageInfo
type Cost struct {
IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
@@ -2027,13 +2709,34 @@ type Cost struct {
Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Cost) Reset() { *m = Cost{} }
-func (m *Cost) String() string { return proto.CompactTextString(m) }
-func (*Cost) ProtoMessage() {}
-func (*Cost) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+func (*Cost) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
+}
+func (m *Cost) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cost.Unmarshal(m, b)
+}
+func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
+}
+func (dst *Cost) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cost.Merge(dst, src)
+}
+func (m *Cost) XXX_Size() int {
+ return xxx_messageInfo_Cost.Size(m)
+}
+func (m *Cost) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cost.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cost proto.InternalMessageInfo
func (m *Cost) GetIndexWrites() int32 {
if m != nil && m.IndexWrites != nil {
@@ -2085,15 +2788,36 @@ func (m *Cost) GetIdSequenceUpdates() int32 {
}
type Cost_CommitCost struct {
- RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
- RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
-func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
-func (*Cost_CommitCost) ProtoMessage() {}
-func (*Cost_CommitCost) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20, 0} }
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
+}
+func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
+}
+func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
+}
+func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
+}
+func (m *Cost_CommitCost) XXX_Size() int {
+ return xxx_messageInfo_Cost_CommitCost.Size(m)
+}
+func (m *Cost_CommitCost) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
if m != nil && m.RequestedEntityPuts != nil {
@@ -2110,19 +2834,40 @@ func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
}
type GetRequest struct {
- Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
- AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetRequest) Reset() { *m = GetRequest{} }
-func (m *GetRequest) String() string { return proto.CompactTextString(m) }
-func (*GetRequest) ProtoMessage() {}
-func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+func (*GetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
+}
+func (m *GetRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetRequest.Unmarshal(m, b)
+}
+func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetRequest.Merge(dst, src)
+}
+func (m *GetRequest) XXX_Size() int {
+ return xxx_messageInfo_GetRequest.Size(m)
+}
+func (m *GetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetRequest proto.InternalMessageInfo
const Default_GetRequest_AllowDeferred bool = false
@@ -2169,16 +2914,37 @@ func (m *GetRequest) GetAllowDeferred() bool {
}
type GetResponse struct {
- Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
- Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
- InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetResponse) Reset() { *m = GetResponse{} }
-func (m *GetResponse) String() string { return proto.CompactTextString(m) }
-func (*GetResponse) ProtoMessage() {}
-func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+func (*GetResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
+}
+func (m *GetResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetResponse.Unmarshal(m, b)
+}
+func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetResponse.Merge(dst, src)
+}
+func (m *GetResponse) XXX_Size() int {
+ return xxx_messageInfo_GetResponse.Size(m)
+}
+func (m *GetResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse proto.InternalMessageInfo
const Default_GetResponse_InOrder bool = true
@@ -2204,16 +2970,37 @@ func (m *GetResponse) GetInOrder() bool {
}
type GetResponse_Entity struct {
- Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
- Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
- Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
-func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
-func (*GetResponse_Entity) ProtoMessage() {}
-func (*GetResponse_Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22, 0} }
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
+}
+func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
+}
+func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
+}
+func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
+}
+func (m *GetResponse_Entity) XXX_Size() int {
+ return xxx_messageInfo_GetResponse_Entity.Size(m)
+}
+func (m *GetResponse_Entity) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
func (m *GetResponse_Entity) GetEntity() *EntityProto {
if m != nil {
@@ -2237,22 +3024,43 @@ func (m *GetResponse_Entity) GetVersion() int64 {
}
type PutRequest struct {
- Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
- Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PutRequest.Unmarshal(m, b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+}
+func (dst *PutRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutRequest.Merge(dst, src)
+}
+func (m *PutRequest) XXX_Size() int {
+ return xxx_messageInfo_PutRequest.Size(m)
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
const Default_PutRequest_Trusted bool = false
const Default_PutRequest_Force bool = false
@@ -2323,16 +3131,37 @@ func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
}
type PutResponse struct {
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PutResponse.Unmarshal(m, b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+}
+func (dst *PutResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutResponse.Merge(dst, src)
+}
+func (m *PutResponse) XXX_Size() int {
+ return xxx_messageInfo_PutResponse.Size(m)
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
func (m *PutResponse) GetKey() []*Reference {
if m != nil {
@@ -2356,18 +3185,39 @@ func (m *PutResponse) GetVersion() []int64 {
}
type TouchRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *TouchRequest) Reset() { *m = TouchRequest{} }
-func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
-func (*TouchRequest) ProtoMessage() {}
-func (*TouchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+func (*TouchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
+}
+func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
+}
+func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
+}
+func (dst *TouchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TouchRequest.Merge(dst, src)
+}
+func (m *TouchRequest) XXX_Size() int {
+ return xxx_messageInfo_TouchRequest.Size(m)
+}
+func (m *TouchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_TouchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
const Default_TouchRequest_Force bool = false
@@ -2407,14 +3257,35 @@ func (m *TouchRequest) GetSnapshot() []*Snapshot {
}
type TouchResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *TouchResponse) Reset() { *m = TouchResponse{} }
-func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
-func (*TouchResponse) ProtoMessage() {}
-func (*TouchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+func (*TouchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
+}
+func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
+}
+func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
+}
+func (dst *TouchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TouchResponse.Merge(dst, src)
+}
+func (m *TouchResponse) XXX_Size() int {
+ return xxx_messageInfo_TouchResponse.Size(m)
+}
+func (m *TouchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TouchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
func (m *TouchResponse) GetCost() *Cost {
if m != nil {
@@ -2424,20 +3295,41 @@ func (m *TouchResponse) GetCost() *Cost {
}
type DeleteRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
-func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRequest) ProtoMessage() {}
-func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+func (*DeleteRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
+}
+func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
+}
+func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
+}
+func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteRequest.Merge(dst, src)
+}
+func (m *DeleteRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteRequest.Size(m)
+}
+func (m *DeleteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
const Default_DeleteRequest_Trusted bool = false
const Default_DeleteRequest_Force bool = false
@@ -2493,15 +3385,36 @@ func (m *DeleteRequest) GetSnapshot() []*Snapshot {
}
type DeleteResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
-func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteResponse) ProtoMessage() {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
+}
+func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
+}
+func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
+}
+func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteResponse.Merge(dst, src)
+}
+func (m *DeleteResponse) XXX_Size() int {
+ return xxx_messageInfo_DeleteResponse.Size(m)
+}
+func (m *DeleteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
func (m *DeleteResponse) GetCost() *Cost {
if m != nil {
@@ -2518,18 +3431,39 @@ func (m *DeleteResponse) GetVersion() []int64 {
}
type NextRequest struct {
- Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
- Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
- Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
- Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *NextRequest) Reset() { *m = NextRequest{} }
-func (m *NextRequest) String() string { return proto.CompactTextString(m) }
-func (*NextRequest) ProtoMessage() {}
-func (*NextRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+func (*NextRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
+}
+func (m *NextRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NextRequest.Unmarshal(m, b)
+}
+func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
+}
+func (dst *NextRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NextRequest.Merge(dst, src)
+}
+func (m *NextRequest) XXX_Size() int {
+ return xxx_messageInfo_NextRequest.Size(m)
+}
+func (m *NextRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_NextRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NextRequest proto.InternalMessageInfo
const Default_NextRequest_Offset int32 = 0
const Default_NextRequest_Compile bool = false
@@ -2570,24 +3504,45 @@ func (m *NextRequest) GetCompile() bool {
}
type QueryResult struct {
- Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
- Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
- SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
- MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
- KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
- SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
- CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
- Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *QueryResult) Reset() { *m = QueryResult{} }
-func (m *QueryResult) String() string { return proto.CompactTextString(m) }
-func (*QueryResult) ProtoMessage() {}
-func (*QueryResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+func (*QueryResult) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
+}
+func (m *QueryResult) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QueryResult.Unmarshal(m, b)
+}
+func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
+}
+func (dst *QueryResult) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryResult.Merge(dst, src)
+}
+func (m *QueryResult) XXX_Size() int {
+ return xxx_messageInfo_QueryResult.Size(m)
+}
+func (m *QueryResult) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryResult proto.InternalMessageInfo
func (m *QueryResult) GetCursor() *Cursor {
if m != nil {
@@ -2667,18 +3622,39 @@ func (m *QueryResult) GetVersion() []int64 {
}
type AllocateIdsRequest struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
- Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
- Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsRequest) ProtoMessage() {}
-func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
+}
+func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
+}
+func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
+}
+func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
+}
+func (m *AllocateIdsRequest) XXX_Size() int {
+ return xxx_messageInfo_AllocateIdsRequest.Size(m)
+}
+func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
if m != nil {
@@ -2716,16 +3692,37 @@ func (m *AllocateIdsRequest) GetReserve() []*Reference {
}
type AllocateIdsResponse struct {
- Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
- End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
- Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsResponse) ProtoMessage() {}
-func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
+}
+func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
+}
+func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
+}
+func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
+}
+func (m *AllocateIdsResponse) XXX_Size() int {
+ return xxx_messageInfo_AllocateIdsResponse.Size(m)
+}
+func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
func (m *AllocateIdsResponse) GetStart() int64 {
if m != nil && m.Start != nil {
@@ -2749,14 +3746,35 @@ func (m *AllocateIdsResponse) GetCost() *Cost {
}
type CompositeIndices struct {
- Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
-func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndices) ProtoMessage() {}
-func (*CompositeIndices) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+func (*CompositeIndices) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
+}
+func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
+}
+func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
+}
+func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeIndices.Merge(dst, src)
+}
+func (m *CompositeIndices) XXX_Size() int {
+ return xxx_messageInfo_CompositeIndices.Size(m)
+}
+func (m *CompositeIndices) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
func (m *CompositeIndices) GetIndex() []*CompositeIndex {
if m != nil {
@@ -2766,16 +3784,37 @@ func (m *CompositeIndices) GetIndex() []*CompositeIndex {
}
type AddActionsRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
- Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
-func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
-func (*AddActionsRequest) ProtoMessage() {}
-func (*AddActionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+func (*AddActionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
+}
+func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
+}
+func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddActionsRequest.Merge(dst, src)
+}
+func (m *AddActionsRequest) XXX_Size() int {
+ return xxx_messageInfo_AddActionsRequest.Size(m)
+}
+func (m *AddActionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
func (m *AddActionsRequest) GetHeader() *InternalHeader {
if m != nil {
@@ -2799,28 +3838,70 @@ func (m *AddActionsRequest) GetAction() []*Action {
}
type AddActionsResponse struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
-func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
-func (*AddActionsResponse) ProtoMessage() {}
-func (*AddActionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+func (*AddActionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
+}
+func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
+}
+func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
+}
+func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddActionsResponse.Merge(dst, src)
+}
+func (m *AddActionsResponse) XXX_Size() int {
+ return xxx_messageInfo_AddActionsResponse.Size(m)
+}
+func (m *AddActionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
type BeginTransactionRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
- DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
- Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
- PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
+ DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
+ Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
+ PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
-func (*BeginTransactionRequest) ProtoMessage() {}
-func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
+}
+func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
+}
+func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
+}
+func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
+}
+func (m *BeginTransactionRequest) XXX_Size() int {
+ return xxx_messageInfo_BeginTransactionRequest.Size(m)
+}
+func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
const Default_BeginTransactionRequest_AllowMultipleEg bool = false
const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
@@ -2868,15 +3949,36 @@ func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
}
type CommitResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CommitResponse) Reset() { *m = CommitResponse{} }
-func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse) ProtoMessage() {}
-func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+func (*CommitResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
+}
+func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
+}
+func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
+}
+func (dst *CommitResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommitResponse.Merge(dst, src)
+}
+func (m *CommitResponse) XXX_Size() int {
+ return xxx_messageInfo_CommitResponse.Size(m)
+}
+func (m *CommitResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
func (m *CommitResponse) GetCost() *Cost {
if m != nil {
@@ -2893,15 +3995,36 @@ func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
}
type CommitResponse_Version struct {
- RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
- Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
-func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse_Version) ProtoMessage() {}
-func (*CommitResponse_Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37, 0} }
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
+}
+func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
+}
+func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
+}
+func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
+}
+func (m *CommitResponse_Version) XXX_Size() int {
+ return xxx_messageInfo_CommitResponse_Version.Size(m)
+}
+func (m *CommitResponse_Version) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
if m != nil {
@@ -2976,10 +4099,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
// 4156 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
index d5fa75be7..5d8067263 100644
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -7,8 +7,10 @@
package internal
import (
+ "log"
"net/http"
"os"
+ "strings"
netcontext "golang.org/x/net/context"
)
@@ -39,7 +41,21 @@ func RequestID(ctx netcontext.Context) string {
}
func Datacenter(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDatacenter)
+ if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
+ return dc
+ }
+ // If the header isn't set, read zone from the metadata service.
+ // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
+ zone, err := getMetadata("instance/zone")
+ if err != nil {
+ log.Printf("Datacenter: %v", err)
+ return ""
+ }
+ parts := strings.Split(string(zone), "/")
+ if len(parts) == 0 {
+ return ""
+ }
+ return parts[len(parts)-1]
}
func ServerSoftware() string {
@@ -47,6 +63,9 @@ func ServerSoftware() string {
if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
return s
}
+ if s := os.Getenv("GAE_ENV"); s != "" {
+ return s
+ }
return "Google App Engine/1.x.x"
}
@@ -56,6 +75,9 @@ func ModuleName(_ netcontext.Context) string {
if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
return s
}
+ if s := os.Getenv("GAE_SERVICE"); s != "" {
+ return s
+ }
return string(mustGetMetadata("instance/attributes/gae_backend_name"))
}
@@ -63,6 +85,9 @@ func VersionID(_ netcontext.Context) string {
if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
return s1 + "." + s2
}
+ if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
}
@@ -70,19 +95,27 @@ func InstanceID() string {
if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
return s
}
+ if s := os.Getenv("GAE_INSTANCE"); s != "" {
+ return s
+ }
return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
}
func partitionlessAppID() string {
// gae_project has everything except the partition prefix.
- appID := os.Getenv("GAE_LONG_APP_ID")
- if appID == "" {
- appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
+ return appID
}
- return appID
+ if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
+ return project
+ }
+ return string(mustGetMetadata("instance/attributes/gae_project"))
}
func fullyQualifiedAppID(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_APPLICATION"); s != "" {
+ return s
+ }
appID := partitionlessAppID()
part := os.Getenv("GAE_PARTITION")
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
index 5549605ad..8545ac4ad 100644
--- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -1,28 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/log/log_service.proto
-/*
-Package log is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/log/log_service.proto
-
-It has these top-level messages:
- LogServiceError
- UserAppLogLine
- UserAppLogGroup
- FlushRequest
- SetStatusRequest
- LogOffset
- LogLine
- RequestLog
- LogModuleVersion
- LogReadRequest
- LogReadResponse
- LogUsageRecord
- LogUsageRequest
- LogUsageResponse
-*/
package log
import proto "github.com/golang/protobuf/proto"
@@ -75,28 +53,72 @@ func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
*x = LogServiceError_ErrorCode(value)
return nil
}
-func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
+}
type LogServiceError struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogServiceError) Reset() { *m = LogServiceError{} }
-func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
-func (*LogServiceError) ProtoMessage() {}
-func (*LogServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+func (*LogServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
+}
+func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
+}
+func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
+}
+func (dst *LogServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogServiceError.Merge(dst, src)
+}
+func (m *LogServiceError) XXX_Size() int {
+ return xxx_messageInfo_LogServiceError.Size(m)
+}
+func (m *LogServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
type UserAppLogLine struct {
- TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
- Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
-func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogLine) ProtoMessage() {}
-func (*UserAppLogLine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+func (*UserAppLogLine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
+}
+func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
+}
+func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
+}
+func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAppLogLine.Merge(dst, src)
+}
+func (m *UserAppLogLine) XXX_Size() int {
+ return xxx_messageInfo_UserAppLogLine.Size(m)
+}
+func (m *UserAppLogLine) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
func (m *UserAppLogLine) GetTimestampUsec() int64 {
if m != nil && m.TimestampUsec != nil {
@@ -120,14 +142,35 @@ func (m *UserAppLogLine) GetMessage() string {
}
type UserAppLogGroup struct {
- LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
-func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogGroup) ProtoMessage() {}
-func (*UserAppLogGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
+}
+func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
+}
+func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
+}
+func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
+}
+func (m *UserAppLogGroup) XXX_Size() int {
+ return xxx_messageInfo_UserAppLogGroup.Size(m)
+}
+func (m *UserAppLogGroup) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
if m != nil {
@@ -137,14 +180,35 @@ func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
}
type FlushRequest struct {
- Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *FlushRequest) Reset() { *m = FlushRequest{} }
-func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
-func (*FlushRequest) ProtoMessage() {}
-func (*FlushRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+func (*FlushRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
+}
+func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
+}
+func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
+}
+func (dst *FlushRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FlushRequest.Merge(dst, src)
+}
+func (m *FlushRequest) XXX_Size() int {
+ return xxx_messageInfo_FlushRequest.Size(m)
+}
+func (m *FlushRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_FlushRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
func (m *FlushRequest) GetLogs() []byte {
if m != nil {
@@ -154,14 +218,35 @@ func (m *FlushRequest) GetLogs() []byte {
}
type SetStatusRequest struct {
- Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
-func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*SetStatusRequest) ProtoMessage() {}
-func (*SetStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+func (*SetStatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
+}
+func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
+}
+func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
+}
+func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetStatusRequest.Merge(dst, src)
+}
+func (m *SetStatusRequest) XXX_Size() int {
+ return xxx_messageInfo_SetStatusRequest.Size(m)
+}
+func (m *SetStatusRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
func (m *SetStatusRequest) GetStatus() string {
if m != nil && m.Status != nil {
@@ -171,14 +256,35 @@ func (m *SetStatusRequest) GetStatus() string {
}
type LogOffset struct {
- RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogOffset) Reset() { *m = LogOffset{} }
-func (m *LogOffset) String() string { return proto.CompactTextString(m) }
-func (*LogOffset) ProtoMessage() {}
-func (*LogOffset) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+func (*LogOffset) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
+}
+func (m *LogOffset) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogOffset.Unmarshal(m, b)
+}
+func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
+}
+func (dst *LogOffset) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogOffset.Merge(dst, src)
+}
+func (m *LogOffset) XXX_Size() int {
+ return xxx_messageInfo_LogOffset.Size(m)
+}
+func (m *LogOffset) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogOffset.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogOffset proto.InternalMessageInfo
func (m *LogOffset) GetRequestId() []byte {
if m != nil {
@@ -188,16 +294,37 @@ func (m *LogOffset) GetRequestId() []byte {
}
type LogLine struct {
- Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
- Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogLine) Reset() { *m = LogLine{} }
-func (m *LogLine) String() string { return proto.CompactTextString(m) }
-func (*LogLine) ProtoMessage() {}
-func (*LogLine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+func (*LogLine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
+}
+func (m *LogLine) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogLine.Unmarshal(m, b)
+}
+func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
+}
+func (dst *LogLine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogLine.Merge(dst, src)
+}
+func (m *LogLine) XXX_Size() int {
+ return xxx_messageInfo_LogLine.Size(m)
+}
+func (m *LogLine) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogLine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogLine proto.InternalMessageInfo
func (m *LogLine) GetTime() int64 {
if m != nil && m.Time != nil {
@@ -259,13 +386,34 @@ type RequestLog struct {
WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *RequestLog) Reset() { *m = RequestLog{} }
-func (m *RequestLog) String() string { return proto.CompactTextString(m) }
-func (*RequestLog) ProtoMessage() {}
-func (*RequestLog) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+func (*RequestLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
+}
+func (m *RequestLog) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RequestLog.Unmarshal(m, b)
+}
+func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
+}
+func (dst *RequestLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RequestLog.Merge(dst, src)
+}
+func (m *RequestLog) XXX_Size() int {
+ return xxx_messageInfo_RequestLog.Size(m)
+}
+func (m *RequestLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_RequestLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestLog proto.InternalMessageInfo
const Default_RequestLog_ModuleId string = "default"
const Default_RequestLog_ReplicaIndex int32 = -1
@@ -538,15 +686,36 @@ func (m *RequestLog) GetServerName() []byte {
}
type LogModuleVersion struct {
- ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
-func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
-func (*LogModuleVersion) ProtoMessage() {}
-func (*LogModuleVersion) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+func (*LogModuleVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
+}
+func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
+}
+func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
+}
+func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogModuleVersion.Merge(dst, src)
+}
+func (m *LogModuleVersion) XXX_Size() int {
+ return xxx_messageInfo_LogModuleVersion.Size(m)
+}
+func (m *LogModuleVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
const Default_LogModuleVersion_ModuleId string = "default"
@@ -565,32 +734,53 @@ func (m *LogModuleVersion) GetVersionId() string {
}
type LogReadRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
- StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
- RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
- MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
- IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
- Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
- CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
- HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
- IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
- AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
- IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
- IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
- CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
- NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
-func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
-func (*LogReadRequest) ProtoMessage() {}
-func (*LogReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+func (*LogReadRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
+}
+func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
+}
+func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
+}
+func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogReadRequest.Merge(dst, src)
+}
+func (m *LogReadRequest) XXX_Size() int {
+ return xxx_messageInfo_LogReadRequest.Size(m)
+}
+func (m *LogReadRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
func (m *LogReadRequest) GetAppId() string {
if m != nil && m.AppId != nil {
@@ -726,16 +916,37 @@ func (m *LogReadRequest) GetNumShards() int32 {
}
type LogReadResponse struct {
- Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
- Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
- LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
-func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
-func (*LogReadResponse) ProtoMessage() {}
-func (*LogReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+func (*LogReadResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
+}
+func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
+}
+func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
+}
+func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogReadResponse.Merge(dst, src)
+}
+func (m *LogReadResponse) XXX_Size() int {
+ return xxx_messageInfo_LogReadResponse.Size(m)
+}
+func (m *LogReadResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
func (m *LogReadResponse) GetLog() []*RequestLog {
if m != nil {
@@ -759,19 +970,40 @@ func (m *LogReadResponse) GetLastEndTime() int64 {
}
type LogUsageRecord struct {
- VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
-func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRecord) ProtoMessage() {}
-func (*LogUsageRecord) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+func (*LogUsageRecord) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
+}
+func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
+}
+func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageRecord.Merge(dst, src)
+}
+func (m *LogUsageRecord) XXX_Size() int {
+ return xxx_messageInfo_LogUsageRecord.Size(m)
+}
+func (m *LogUsageRecord) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
func (m *LogUsageRecord) GetVersionId() string {
if m != nil && m.VersionId != nil {
@@ -816,21 +1048,42 @@ func (m *LogUsageRecord) GetRecords() int32 {
}
type LogUsageRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
- CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
- UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
- VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
-func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRequest) ProtoMessage() {}
-func (*LogUsageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+func (*LogUsageRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
+}
+func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
+}
+func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageRequest.Merge(dst, src)
+}
+func (m *LogUsageRequest) XXX_Size() int {
+ return xxx_messageInfo_LogUsageRequest.Size(m)
+}
+func (m *LogUsageRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
const Default_LogUsageRequest_ResolutionHours uint32 = 1
@@ -891,15 +1144,36 @@ func (m *LogUsageRequest) GetVersionsOnly() bool {
}
type LogUsageResponse struct {
- Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
- Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
-func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
-func (*LogUsageResponse) ProtoMessage() {}
-func (*LogUsageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+func (*LogUsageResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
+}
+func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
+}
+func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageResponse.Merge(dst, src)
+}
+func (m *LogUsageResponse) XXX_Size() int {
+ return xxx_messageInfo_LogUsageResponse.Size(m)
+}
+func (m *LogUsageResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
if m != nil {
@@ -933,10 +1207,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_log_service_f054fd4b5012319d = []byte{
// 1553 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
index 9cc1f71d1..c4ba63bb4 100644
--- a/vendor/google.golang.org/appengine/internal/metadata.go
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -12,7 +12,6 @@ package internal
import (
"fmt"
"io/ioutil"
- "log"
"net/http"
"net/url"
)
@@ -32,7 +31,7 @@ var (
func mustGetMetadata(key string) []byte {
b, err := getMetadata(key)
if err != nil {
- log.Fatalf("Metadata fetch failed: %v", err)
+ panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
}
return b
}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
index 518005254..ddfc0c04a 100644
--- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -1,31 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/modules/modules_service.proto
-/*
-Package modules is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/modules/modules_service.proto
-
-It has these top-level messages:
- ModulesServiceError
- GetModulesRequest
- GetModulesResponse
- GetVersionsRequest
- GetVersionsResponse
- GetDefaultVersionRequest
- GetDefaultVersionResponse
- GetNumInstancesRequest
- GetNumInstancesResponse
- SetNumInstancesRequest
- SetNumInstancesResponse
- StartModuleRequest
- StartModuleResponse
- StopModuleRequest
- StopModuleResponse
- GetHostnameRequest
- GetHostnameResponse
-*/
package modules
import proto "github.com/golang/protobuf/proto"
@@ -88,36 +63,99 @@ func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
return nil
}
func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{0, 0}
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0}
}
type ModulesServiceError struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
-func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
-func (*ModulesServiceError) ProtoMessage() {}
-func (*ModulesServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+func (*ModulesServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0}
+}
+func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b)
+}
+func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic)
+}
+func (dst *ModulesServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ModulesServiceError.Merge(dst, src)
+}
+func (m *ModulesServiceError) XXX_Size() int {
+ return xxx_messageInfo_ModulesServiceError.Size(m)
+}
+func (m *ModulesServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_ModulesServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo
type GetModulesRequest struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
-func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
-func (*GetModulesRequest) ProtoMessage() {}
-func (*GetModulesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+func (*GetModulesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1}
+}
+func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b)
+}
+func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetModulesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetModulesRequest.Merge(dst, src)
+}
+func (m *GetModulesRequest) XXX_Size() int {
+ return xxx_messageInfo_GetModulesRequest.Size(m)
+}
+func (m *GetModulesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetModulesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo
type GetModulesResponse struct {
- Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
-func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
-func (*GetModulesResponse) ProtoMessage() {}
-func (*GetModulesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+func (*GetModulesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2}
+}
+func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b)
+}
+func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetModulesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetModulesResponse.Merge(dst, src)
+}
+func (m *GetModulesResponse) XXX_Size() int {
+ return xxx_messageInfo_GetModulesResponse.Size(m)
+}
+func (m *GetModulesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetModulesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo
func (m *GetModulesResponse) GetModule() []string {
if m != nil {
@@ -127,14 +165,35 @@ func (m *GetModulesResponse) GetModule() []string {
}
type GetVersionsRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
-func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
-func (*GetVersionsRequest) ProtoMessage() {}
-func (*GetVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+func (*GetVersionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3}
+}
+func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b)
+}
+func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetVersionsRequest.Merge(dst, src)
+}
+func (m *GetVersionsRequest) XXX_Size() int {
+ return xxx_messageInfo_GetVersionsRequest.Size(m)
+}
+func (m *GetVersionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo
func (m *GetVersionsRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -144,14 +203,35 @@ func (m *GetVersionsRequest) GetModule() string {
}
type GetVersionsResponse struct {
- Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
-func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
-func (*GetVersionsResponse) ProtoMessage() {}
-func (*GetVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+func (*GetVersionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4}
+}
+func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b)
+}
+func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetVersionsResponse.Merge(dst, src)
+}
+func (m *GetVersionsResponse) XXX_Size() int {
+ return xxx_messageInfo_GetVersionsResponse.Size(m)
+}
+func (m *GetVersionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo
func (m *GetVersionsResponse) GetVersion() []string {
if m != nil {
@@ -161,14 +241,35 @@ func (m *GetVersionsResponse) GetVersion() []string {
}
type GetDefaultVersionRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
-func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultVersionRequest) ProtoMessage() {}
-func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5}
+}
+func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b)
+}
+func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src)
+}
+func (m *GetDefaultVersionRequest) XXX_Size() int {
+ return xxx_messageInfo_GetDefaultVersionRequest.Size(m)
+}
+func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo
func (m *GetDefaultVersionRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -178,14 +279,35 @@ func (m *GetDefaultVersionRequest) GetModule() string {
}
type GetDefaultVersionResponse struct {
- Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
-func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultVersionResponse) ProtoMessage() {}
-func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6}
+}
+func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b)
+}
+func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src)
+}
+func (m *GetDefaultVersionResponse) XXX_Size() int {
+ return xxx_messageInfo_GetDefaultVersionResponse.Size(m)
+}
+func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo
func (m *GetDefaultVersionResponse) GetVersion() string {
if m != nil && m.Version != nil {
@@ -195,15 +317,36 @@ func (m *GetDefaultVersionResponse) GetVersion() string {
}
type GetNumInstancesRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
-func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
-func (*GetNumInstancesRequest) ProtoMessage() {}
-func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7}
+}
+func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b)
+}
+func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src)
+}
+func (m *GetNumInstancesRequest) XXX_Size() int {
+ return xxx_messageInfo_GetNumInstancesRequest.Size(m)
+}
+func (m *GetNumInstancesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo
func (m *GetNumInstancesRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -220,14 +363,35 @@ func (m *GetNumInstancesRequest) GetVersion() string {
}
type GetNumInstancesResponse struct {
- Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
-func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
-func (*GetNumInstancesResponse) ProtoMessage() {}
-func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8}
+}
+func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b)
+}
+func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src)
+}
+func (m *GetNumInstancesResponse) XXX_Size() int {
+ return xxx_messageInfo_GetNumInstancesResponse.Size(m)
+}
+func (m *GetNumInstancesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo
func (m *GetNumInstancesResponse) GetInstances() int64 {
if m != nil && m.Instances != nil {
@@ -237,16 +401,37 @@ func (m *GetNumInstancesResponse) GetInstances() int64 {
}
type SetNumInstancesRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
-func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
-func (*SetNumInstancesRequest) ProtoMessage() {}
-func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9}
+}
+func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b)
+}
+func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic)
+}
+func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src)
+}
+func (m *SetNumInstancesRequest) XXX_Size() int {
+ return xxx_messageInfo_SetNumInstancesRequest.Size(m)
+}
+func (m *SetNumInstancesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo
func (m *SetNumInstancesRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -270,24 +455,66 @@ func (m *SetNumInstancesRequest) GetInstances() int64 {
}
type SetNumInstancesResponse struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
-func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
-func (*SetNumInstancesResponse) ProtoMessage() {}
-func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10}
+}
+func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b)
+}
+func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic)
+}
+func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src)
+}
+func (m *SetNumInstancesResponse) XXX_Size() int {
+ return xxx_messageInfo_SetNumInstancesResponse.Size(m)
+}
+func (m *SetNumInstancesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo
type StartModuleRequest struct {
- Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
-func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
-func (*StartModuleRequest) ProtoMessage() {}
-func (*StartModuleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+func (*StartModuleRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11}
+}
+func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b)
+}
+func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic)
+}
+func (dst *StartModuleRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StartModuleRequest.Merge(dst, src)
+}
+func (m *StartModuleRequest) XXX_Size() int {
+ return xxx_messageInfo_StartModuleRequest.Size(m)
+}
+func (m *StartModuleRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_StartModuleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo
func (m *StartModuleRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -304,24 +531,66 @@ func (m *StartModuleRequest) GetVersion() string {
}
type StartModuleResponse struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
-func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
-func (*StartModuleResponse) ProtoMessage() {}
-func (*StartModuleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+func (*StartModuleResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12}
+}
+func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b)
+}
+func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic)
+}
+func (dst *StartModuleResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StartModuleResponse.Merge(dst, src)
+}
+func (m *StartModuleResponse) XXX_Size() int {
+ return xxx_messageInfo_StartModuleResponse.Size(m)
+}
+func (m *StartModuleResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_StartModuleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo
type StopModuleRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
-func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
-func (*StopModuleRequest) ProtoMessage() {}
-func (*StopModuleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+func (*StopModuleRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13}
+}
+func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b)
+}
+func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic)
+}
+func (dst *StopModuleRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StopModuleRequest.Merge(dst, src)
+}
+func (m *StopModuleRequest) XXX_Size() int {
+ return xxx_messageInfo_StopModuleRequest.Size(m)
+}
+func (m *StopModuleRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_StopModuleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo
func (m *StopModuleRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -338,25 +607,67 @@ func (m *StopModuleRequest) GetVersion() string {
}
type StopModuleResponse struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
-func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
-func (*StopModuleResponse) ProtoMessage() {}
-func (*StopModuleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+func (*StopModuleResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14}
+}
+func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b)
+}
+func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic)
+}
+func (dst *StopModuleResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StopModuleResponse.Merge(dst, src)
+}
+func (m *StopModuleResponse) XXX_Size() int {
+ return xxx_messageInfo_StopModuleResponse.Size(m)
+}
+func (m *StopModuleResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_StopModuleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo
type GetHostnameRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
-func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetHostnameRequest) ProtoMessage() {}
-func (*GetHostnameRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+func (*GetHostnameRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15}
+}
+func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b)
+}
+func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetHostnameRequest.Merge(dst, src)
+}
+func (m *GetHostnameRequest) XXX_Size() int {
+ return xxx_messageInfo_GetHostnameRequest.Size(m)
+}
+func (m *GetHostnameRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo
func (m *GetHostnameRequest) GetModule() string {
if m != nil && m.Module != nil {
@@ -380,14 +691,35 @@ func (m *GetHostnameRequest) GetInstance() string {
}
type GetHostnameResponse struct {
- Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
-func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetHostnameResponse) ProtoMessage() {}
-func (*GetHostnameResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+func (*GetHostnameResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16}
+}
+func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b)
+}
+func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetHostnameResponse.Merge(dst, src)
+}
+func (m *GetHostnameResponse) XXX_Size() int {
+ return xxx_messageInfo_GetHostnameResponse.Size(m)
+}
+func (m *GetHostnameResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo
func (m *GetHostnameResponse) GetHostname() string {
if m != nil && m.Hostname != nil {
@@ -417,10 +749,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{
// 457 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30,
0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c,
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
index 172aebe8e..8d782a38e 100644
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -1,18 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
-/*
-Package remote_api is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/remote_api/remote_api.proto
-
-It has these top-level messages:
- Request
- ApplicationError
- RpcError
- Response
-*/
package remote_api
import proto "github.com/golang/protobuf/proto"
@@ -95,20 +83,43 @@ func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
*x = RpcError_ErrorCode(value)
return nil
}
-func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
-
-type Request struct {
- ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
- Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
- Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
- RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
}
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Request.Unmarshal(m, b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+}
+func (dst *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(dst, src)
+}
+func (m *Request) XXX_Size() int {
+ return xxx_messageInfo_Request.Size(m)
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
func (m *Request) GetServiceName() string {
if m != nil && m.ServiceName != nil {
@@ -139,15 +150,36 @@ func (m *Request) GetRequestId() string {
}
type ApplicationError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *ApplicationError) Reset() { *m = ApplicationError{} }
-func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
-func (*ApplicationError) ProtoMessage() {}
-func (*ApplicationError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+func (*ApplicationError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
+}
+func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
+}
+func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
+}
+func (dst *ApplicationError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ApplicationError.Merge(dst, src)
+}
+func (m *ApplicationError) XXX_Size() int {
+ return xxx_messageInfo_ApplicationError.Size(m)
+}
+func (m *ApplicationError) XXX_DiscardUnknown() {
+ xxx_messageInfo_ApplicationError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
func (m *ApplicationError) GetCode() int32 {
if m != nil && m.Code != nil {
@@ -164,15 +196,36 @@ func (m *ApplicationError) GetDetail() string {
}
type RpcError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *RpcError) Reset() { *m = RpcError{} }
-func (m *RpcError) String() string { return proto.CompactTextString(m) }
-func (*RpcError) ProtoMessage() {}
-func (*RpcError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+func (*RpcError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
+}
+func (m *RpcError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RpcError.Unmarshal(m, b)
+}
+func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
+}
+func (dst *RpcError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RpcError.Merge(dst, src)
+}
+func (m *RpcError) XXX_Size() int {
+ return xxx_messageInfo_RpcError.Size(m)
+}
+func (m *RpcError) XXX_DiscardUnknown() {
+ xxx_messageInfo_RpcError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RpcError proto.InternalMessageInfo
func (m *RpcError) GetCode() int32 {
if m != nil && m.Code != nil {
@@ -189,18 +242,39 @@ func (m *RpcError) GetDetail() string {
}
type Response struct {
- Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
- Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
- ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
- JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
- RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+func (*Response) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
+}
+func (m *Response) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Response.Unmarshal(m, b)
+}
+func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Response.Marshal(b, m, deterministic)
+}
+func (dst *Response) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Response.Merge(dst, src)
+}
+func (m *Response) XXX_Size() int {
+ return xxx_messageInfo_Response.Size(m)
+}
+func (m *Response) XXX_DiscardUnknown() {
+ xxx_messageInfo_Response.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetResponse() []byte {
if m != nil {
@@ -245,10 +319,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_remote_api_1978114ec33a273d = []byte{
// 531 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
index 7c96c9d40..5f727750a 100644
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -1,17 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
-/*
-Package urlfetch is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
-
-It has these top-level messages:
- URLFetchServiceError
- URLFetchRequest
- URLFetchResponse
-*/
package urlfetch
import proto "github.com/golang/protobuf/proto"
@@ -95,7 +84,7 @@ func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
return nil
}
func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{0, 0}
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
}
type URLFetchRequest_RequestMethod int32
@@ -143,17 +132,38 @@ func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
return nil
}
func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{1, 0}
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
}
type URLFetchServiceError struct {
- XXX_unrecognized []byte `json:"-"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
-func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
-func (*URLFetchServiceError) ProtoMessage() {}
-func (*URLFetchServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
+}
+func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
+}
+func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
+}
+func (m *URLFetchServiceError) XXX_Size() int {
+ return xxx_messageInfo_URLFetchServiceError.Size(m)
+}
+func (m *URLFetchServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
type URLFetchRequest struct {
Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
@@ -163,13 +173,34 @@ type URLFetchRequest struct {
FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
-func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest) ProtoMessage() {}
-func (*URLFetchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+func (*URLFetchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
+}
+func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
+}
+func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchRequest.Merge(dst, src)
+}
+func (m *URLFetchRequest) XXX_Size() int {
+ return xxx_messageInfo_URLFetchRequest.Size(m)
+}
+func (m *URLFetchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
const Default_URLFetchRequest_FollowRedirects bool = true
const Default_URLFetchRequest_MustValidateServerCertificate bool = true
@@ -224,15 +255,36 @@ func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
}
type URLFetchRequest_Header struct {
- Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
-func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest_Header) ProtoMessage() {}
-func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
+}
+func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
+}
+func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
+}
+func (m *URLFetchRequest_Header) XXX_Size() int {
+ return xxx_messageInfo_URLFetchRequest_Header.Size(m)
+}
+func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
func (m *URLFetchRequest_Header) GetKey() string {
if m != nil && m.Key != nil {
@@ -259,13 +311,34 @@ type URLFetchResponse struct {
ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
-func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse) ProtoMessage() {}
-func (*URLFetchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+func (*URLFetchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
+}
+func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
+}
+func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchResponse.Merge(dst, src)
+}
+func (m *URLFetchResponse) XXX_Size() int {
+ return xxx_messageInfo_URLFetchResponse.Size(m)
+}
+func (m *URLFetchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
const Default_URLFetchResponse_ContentWasTruncated bool = false
const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
@@ -343,15 +416,36 @@ func (m *URLFetchResponse) GetApiBytesReceived() int64 {
}
type URLFetchResponse_Header struct {
- Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
-func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse_Header) ProtoMessage() {}
-func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
+}
+func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
+}
+func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
+}
+func (m *URLFetchResponse_Header) XXX_Size() int {
+ return xxx_messageInfo_URLFetchResponse_Header.Size(m)
+}
+func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
func (m *URLFetchResponse_Header) GetKey() string {
if m != nil && m.Key != nil {
@@ -376,10 +470,10 @@ func init() {
}
func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor0)
+ proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
}
-var fileDescriptor0 = []byte{
+var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
// 770 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
index 9521b50e9..53d57f67a 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
@@ -1,12 +1,24 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/annotations.proto
-package annotations // import "google.golang.org/genproto/googleapis/api/annotations"
+/*
+Package annotations is a generated protocol buffer package.
+
+It is generated from these files:
+ google/api/annotations.proto
+ google/api/http.proto
+
+It has these top-level messages:
+ Http
+ HttpRule
+ CustomHttpPattern
+*/
+package annotations
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
-import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -20,7 +32,7 @@ var _ = math.Inf
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
var E_Http = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MethodOptions)(nil),
+ ExtendedType: (*google_protobuf.MethodOptions)(nil),
ExtensionType: (*HttpRule)(nil),
Field: 72295728,
Name: "google.api.http",
@@ -32,11 +44,9 @@ func init() {
proto.RegisterExtension(E_Http)
}
-func init() {
- proto.RegisterFile("google/api/annotations.proto", fileDescriptor_annotations_55609bb51d80951d)
-}
+func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) }
-var fileDescriptor_annotations_55609bb51d80951d = []byte{
+var fileDescriptor0 = []byte{
// 208 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
index 1a8a27b65..f91c60462 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/http.proto
-package annotations // import "google.golang.org/genproto/googleapis/api/annotations"
+package annotations
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -12,55 +12,20 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// Defines the HTTP configuration for an API service. It contains a list of
+// Defines the HTTP configuration for a service. It contains a list of
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
// to one or more HTTP REST API methods.
type Http struct {
// A list of HTTP configuration rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
- Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
- // When set to true, URL path parmeters will be fully URI-decoded except in
- // cases of single segment matches in reserved expansion, where "%2F" will be
- // left encoded.
- //
- // The default behavior is to not decode RFC 6570 reserved characters in multi
- // segment matches.
- FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
-func (m *Http) Reset() { *m = Http{} }
-func (m *Http) String() string { return proto.CompactTextString(m) }
-func (*Http) ProtoMessage() {}
-func (*Http) Descriptor() ([]byte, []int) {
- return fileDescriptor_http_e457621dddd7365b, []int{0}
-}
-func (m *Http) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Http.Unmarshal(m, b)
-}
-func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Http.Marshal(b, m, deterministic)
-}
-func (dst *Http) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Http.Merge(dst, src)
-}
-func (m *Http) XXX_Size() int {
- return xxx_messageInfo_Http.Size(m)
-}
-func (m *Http) XXX_DiscardUnknown() {
- xxx_messageInfo_Http.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Http proto.InternalMessageInfo
+func (m *Http) Reset() { *m = Http{} }
+func (m *Http) String() string { return proto.CompactTextString(m) }
+func (*Http) ProtoMessage() {}
+func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *Http) GetRules() []*HttpRule {
if m != nil {
@@ -69,19 +34,12 @@ func (m *Http) GetRules() []*HttpRule {
return nil
}
-func (m *Http) GetFullyDecodeReservedExpansion() bool {
- if m != nil {
- return m.FullyDecodeReservedExpansion
- }
- return false
-}
-
// `HttpRule` defines the mapping of an RPC method to one or more HTTP
-// REST API methods. The mapping specifies how different portions of the RPC
-// request message are mapped to URL path, URL query parameters, and
-// HTTP request body. The mapping is typically specified as an
-// `google.api.http` annotation on the RPC method,
-// see "google/api/annotations.proto" for details.
+// REST APIs. The mapping determines what portions of the request
+// message are populated from the path, query parameters, or body of
+// the HTTP request. The mapping is typically specified as an
+// `google.api.http` annotation, see "google/api/annotations.proto"
+// for details.
//
// The mapping consists of a field specifying the path template and
// method kind. The path template can refer to fields in the request
@@ -129,11 +87,6 @@ func (m *Http) GetFullyDecodeReservedExpansion() bool {
// parameters. Assume the following definition of the request message:
//
//
-// service Messaging {
-// rpc GetMessage(GetMessageRequest) returns (Message) {
-// option (google.api.http).get = "/v1/messages/{message_id}";
-// }
-// }
// message GetMessageRequest {
// message SubMessage {
// string subfield = 1;
@@ -246,7 +199,7 @@ func (m *Http) GetFullyDecodeReservedExpansion() bool {
// to the request message are as follows:
//
// 1. The `body` field specifies either `*` or a field path, or is
-// omitted. If omitted, it indicates there is no HTTP request body.
+// omitted. If omitted, it assumes there is no HTTP body.
// 2. Leaf fields (recursive expansion of nested messages in the
// request) can be classified into three types:
// (a) Matched in the URL template.
@@ -265,39 +218,33 @@ func (m *Http) GetFullyDecodeReservedExpansion() bool {
// FieldPath = IDENT { "." IDENT } ;
// Verb = ":" LITERAL ;
//
-// The syntax `*` matches a single path segment. The syntax `**` matches zero
-// or more path segments, which must be the last part of the path except the
-// `Verb`. The syntax `LITERAL` matches literal text in the path.
+// The syntax `*` matches a single path segment. It follows the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
+// Expansion.
//
-// The syntax `Variable` matches part of the URL path as specified by its
-// template. A variable template must not contain other variables. If a variable
+// The syntax `**` matches zero or more path segments. It follows the semantics
+// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
+// Expansion. NOTE: it must be the last segment in the path except the Verb.
+//
+// The syntax `LITERAL` matches literal text in the URL path.
+//
+// The syntax `Variable` matches the entire path as specified by its template;
+// this nested template must not contain further variables. If a variable
// matches a single path segment, its template may be omitted, e.g. `{var}`
// is equivalent to `{var=*}`.
//
-// If a variable contains exactly one path segment, such as `"{var}"` or
-// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
-// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
-// Discovery Document as `{var}`.
-//
-// If a variable contains one or more path segments, such as `"{var=foo/*}"`
-// or `"{var=**}"`, when such a variable is expanded into a URL path, all
-// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
-// show up in the Discovery Document as `{+var}`.
-//
-// NOTE: While the single segment variable matches the semantics of
-// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
-// Simple String Expansion, the multi segment variable **does not** match
-// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
-// does not expand special characters like `?` and `#`, which would lead
-// to invalid URLs.
-//
// NOTE: the field paths in variables and in the `body` must not refer to
// repeated fields or map fields.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
type HttpRule struct {
// Selects methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
- Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
+ Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// Determines the URL pattern is matched by this rules. This pattern can be
// used with any of the {get|put|post|delete|patch} methods. A custom method
// can be defined using the 'custom' field.
@@ -314,89 +261,46 @@ type HttpRule struct {
// `*` for mapping all fields not captured by the path pattern to the HTTP
// body. NOTE: the referred field must not be a repeated field and must be
// present at the top-level of request message type.
- Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
- // Optional. The name of the response field whose value is mapped to the HTTP
- // body of response. Other response fields are ignored. When
- // not set, the response message will be used as HTTP body of response.
- ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
+ Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"`
// Additional HTTP bindings for the selector. Nested bindings must
// not contain an `additional_bindings` field themselves (that is,
// the nesting may only be one level deep).
- AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"`
}
-func (m *HttpRule) Reset() { *m = HttpRule{} }
-func (m *HttpRule) String() string { return proto.CompactTextString(m) }
-func (*HttpRule) ProtoMessage() {}
-func (*HttpRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_http_e457621dddd7365b, []int{1}
-}
-func (m *HttpRule) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HttpRule.Unmarshal(m, b)
-}
-func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
-}
-func (dst *HttpRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HttpRule.Merge(dst, src)
-}
-func (m *HttpRule) XXX_Size() int {
- return xxx_messageInfo_HttpRule.Size(m)
-}
-func (m *HttpRule) XXX_DiscardUnknown() {
- xxx_messageInfo_HttpRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HttpRule proto.InternalMessageInfo
-
-func (m *HttpRule) GetSelector() string {
- if m != nil {
- return m.Selector
- }
- return ""
-}
+func (m *HttpRule) Reset() { *m = HttpRule{} }
+func (m *HttpRule) String() string { return proto.CompactTextString(m) }
+func (*HttpRule) ProtoMessage() {}
+func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
- Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
+ Get string `protobuf:"bytes,2,opt,name=get,oneof"`
}
-
type HttpRule_Put struct {
- Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
+ Put string `protobuf:"bytes,3,opt,name=put,oneof"`
}
-
type HttpRule_Post struct {
- Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
+ Post string `protobuf:"bytes,4,opt,name=post,oneof"`
}
-
type HttpRule_Delete struct {
- Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
+ Delete string `protobuf:"bytes,5,opt,name=delete,oneof"`
}
-
type HttpRule_Patch struct {
- Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
+ Patch string `protobuf:"bytes,6,opt,name=patch,oneof"`
}
-
type HttpRule_Custom struct {
- Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
+ Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"`
}
-func (*HttpRule_Get) isHttpRule_Pattern() {}
-
-func (*HttpRule_Put) isHttpRule_Pattern() {}
-
-func (*HttpRule_Post) isHttpRule_Pattern() {}
-
+func (*HttpRule_Get) isHttpRule_Pattern() {}
+func (*HttpRule_Put) isHttpRule_Pattern() {}
+func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
-
-func (*HttpRule_Patch) isHttpRule_Pattern() {}
-
+func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
@@ -406,6 +310,13 @@ func (m *HttpRule) GetPattern() isHttpRule_Pattern {
return nil
}
+func (m *HttpRule) GetSelector() string {
+ if m != nil {
+ return m.Selector
+ }
+ return ""
+}
+
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
@@ -455,13 +366,6 @@ func (m *HttpRule) GetBody() string {
return ""
}
-func (m *HttpRule) GetResponseBody() string {
- if m != nil {
- return m.ResponseBody
- }
- return ""
-}
-
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
@@ -568,28 +472,28 @@ func _HttpRule_OneofSizer(msg proto.Message) (n int) {
// pattern
switch x := m.Pattern.(type) {
case *HttpRule_Get:
- n += 1 // tag and wire
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Get)))
n += len(x.Get)
case *HttpRule_Put:
- n += 1 // tag and wire
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Put)))
n += len(x.Put)
case *HttpRule_Post:
- n += 1 // tag and wire
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Post)))
n += len(x.Post)
case *HttpRule_Delete:
- n += 1 // tag and wire
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Delete)))
n += len(x.Delete)
case *HttpRule_Patch:
- n += 1 // tag and wire
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Patch)))
n += len(x.Patch)
case *HttpRule_Custom:
s := proto.Size(x.Custom)
- n += 1 // tag and wire
+ n += proto.SizeVarint(8<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
@@ -602,37 +506,15 @@ func _HttpRule_OneofSizer(msg proto.Message) (n int) {
// A custom pattern is used for defining custom HTTP verb.
type CustomHttpPattern struct {
// The name of this custom HTTP verb.
- Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
+ Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
// The path matched by this custom verb.
- Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
}
-func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
-func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
-func (*CustomHttpPattern) ProtoMessage() {}
-func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
- return fileDescriptor_http_e457621dddd7365b, []int{2}
-}
-func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
-}
-func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
-}
-func (dst *CustomHttpPattern) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CustomHttpPattern.Merge(dst, src)
-}
-func (m *CustomHttpPattern) XXX_Size() int {
- return xxx_messageInfo_CustomHttpPattern.Size(m)
-}
-func (m *CustomHttpPattern) XXX_DiscardUnknown() {
- xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
+func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
+func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
+func (*CustomHttpPattern) ProtoMessage() {}
+func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
@@ -654,35 +536,31 @@ func init() {
proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
}
-func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_http_e457621dddd7365b) }
+func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) }
-var fileDescriptor_http_e457621dddd7365b = []byte{
- // 419 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30,
- 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52,
- 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37,
- 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d,
- 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b,
- 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e,
- 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e,
- 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea,
- 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc,
- 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55,
- 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1,
- 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6,
- 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52,
- 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef,
- 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55,
- 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42,
- 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22,
- 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a,
- 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65,
- 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b,
- 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63,
- 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec,
- 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea,
- 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18,
- 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd,
- 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac,
- 0x02, 0x00, 0x00,
+var fileDescriptor1 = []byte{
+ // 359 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30,
+ 0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29,
+ 0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1,
+ 0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe,
+ 0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8,
+ 0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39,
+ 0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62,
+ 0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18,
+ 0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2,
+ 0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48,
+ 0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24,
+ 0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49,
+ 0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc,
+ 0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84,
+ 0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12,
+ 0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74,
+ 0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4,
+ 0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67,
+ 0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90,
+ 0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64,
+ 0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a,
+ 0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68,
+ 0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
index dfc87966b..2f481a396 100644
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
@@ -1,7 +1,24 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/iam/v1/iam_policy.proto
-package iam // import "google.golang.org/genproto/googleapis/iam/v1"
+/*
+Package iam is a generated protocol buffer package.
+
+It is generated from these files:
+ google/iam/v1/iam_policy.proto
+ google/iam/v1/policy.proto
+
+It has these top-level messages:
+ SetIamPolicyRequest
+ GetIamPolicyRequest
+ TestIamPermissionsRequest
+ TestIamPermissionsResponse
+ Policy
+ Binding
+ PolicyDelta
+ BindingDelta
+*/
+package iam
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -29,40 +46,18 @@ type SetIamPolicyRequest struct {
// REQUIRED: The resource for which the policy is being specified.
// `resource` is usually specified as a path. For example, a Project
// resource is specified as `projects/{project}`.
- Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
// REQUIRED: The complete policy to be applied to the `resource`. The size of
// the policy is limited to a few 10s of KB. An empty policy is a
// valid policy but certain Cloud Platform services (such as Projects)
// might reject them.
- Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Policy *Policy `protobuf:"bytes,2,opt,name=policy" json:"policy,omitempty"`
}
-func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} }
-func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
-func (*SetIamPolicyRequest) ProtoMessage() {}
-func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{0}
-}
-func (m *SetIamPolicyRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetIamPolicyRequest.Unmarshal(m, b)
-}
-func (m *SetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetIamPolicyRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetIamPolicyRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetIamPolicyRequest.Merge(dst, src)
-}
-func (m *SetIamPolicyRequest) XXX_Size() int {
- return xxx_messageInfo_SetIamPolicyRequest.Size(m)
-}
-func (m *SetIamPolicyRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SetIamPolicyRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetIamPolicyRequest proto.InternalMessageInfo
+func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} }
+func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*SetIamPolicyRequest) ProtoMessage() {}
+func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *SetIamPolicyRequest) GetResource() string {
if m != nil {
@@ -83,35 +78,13 @@ type GetIamPolicyRequest struct {
// REQUIRED: The resource for which the policy is being requested.
// `resource` is usually specified as a path. For example, a Project
// resource is specified as `projects/{project}`.
- Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
}
-func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} }
-func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
-func (*GetIamPolicyRequest) ProtoMessage() {}
-func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{1}
-}
-func (m *GetIamPolicyRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetIamPolicyRequest.Unmarshal(m, b)
-}
-func (m *GetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetIamPolicyRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetIamPolicyRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetIamPolicyRequest.Merge(dst, src)
-}
-func (m *GetIamPolicyRequest) XXX_Size() int {
- return xxx_messageInfo_GetIamPolicyRequest.Size(m)
-}
-func (m *GetIamPolicyRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetIamPolicyRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetIamPolicyRequest proto.InternalMessageInfo
+func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} }
+func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*GetIamPolicyRequest) ProtoMessage() {}
+func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *GetIamPolicyRequest) GetResource() string {
if m != nil {
@@ -125,40 +98,18 @@ type TestIamPermissionsRequest struct {
// REQUIRED: The resource for which the policy detail is being requested.
// `resource` is usually specified as a path. For example, a Project
// resource is specified as `projects/{project}`.
- Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
// The set of permissions to check for the `resource`. Permissions with
// wildcards (such as '*' or 'storage.*') are not allowed. For more
// information see
// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
- Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Permissions []string `protobuf:"bytes,2,rep,name=permissions" json:"permissions,omitempty"`
}
-func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} }
-func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) }
-func (*TestIamPermissionsRequest) ProtoMessage() {}
-func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{2}
-}
-func (m *TestIamPermissionsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TestIamPermissionsRequest.Unmarshal(m, b)
-}
-func (m *TestIamPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TestIamPermissionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *TestIamPermissionsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TestIamPermissionsRequest.Merge(dst, src)
-}
-func (m *TestIamPermissionsRequest) XXX_Size() int {
- return xxx_messageInfo_TestIamPermissionsRequest.Size(m)
-}
-func (m *TestIamPermissionsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TestIamPermissionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TestIamPermissionsRequest proto.InternalMessageInfo
+func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} }
+func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) }
+func (*TestIamPermissionsRequest) ProtoMessage() {}
+func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TestIamPermissionsRequest) GetResource() string {
if m != nil {
@@ -178,35 +129,13 @@ func (m *TestIamPermissionsRequest) GetPermissions() []string {
type TestIamPermissionsResponse struct {
// A subset of `TestPermissionsRequest.permissions` that the caller is
// allowed.
- Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Permissions []string `protobuf:"bytes,1,rep,name=permissions" json:"permissions,omitempty"`
}
-func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} }
-func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) }
-func (*TestIamPermissionsResponse) ProtoMessage() {}
-func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{3}
-}
-func (m *TestIamPermissionsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TestIamPermissionsResponse.Unmarshal(m, b)
-}
-func (m *TestIamPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TestIamPermissionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *TestIamPermissionsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TestIamPermissionsResponse.Merge(dst, src)
-}
-func (m *TestIamPermissionsResponse) XXX_Size() int {
- return xxx_messageInfo_TestIamPermissionsResponse.Size(m)
-}
-func (m *TestIamPermissionsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TestIamPermissionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TestIamPermissionsResponse proto.InternalMessageInfo
+func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} }
+func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) }
+func (*TestIamPermissionsResponse) ProtoMessage() {}
+func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *TestIamPermissionsResponse) GetPermissions() []string {
if m != nil {
@@ -230,9 +159,8 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
-// IAMPolicyClient is the client API for IAMPolicy service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+// Client API for IAMPolicy service
+
type IAMPolicyClient interface {
// Sets the access control policy on the specified resource. Replaces any
// existing policy.
@@ -257,7 +185,7 @@ func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient {
func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
out := new(Policy)
- err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...)
+ err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
@@ -266,7 +194,7 @@ func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequ
func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
out := new(Policy)
- err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...)
+ err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
@@ -275,14 +203,15 @@ func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequ
func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
out := new(TestIamPermissionsResponse)
- err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...)
+ err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-// IAMPolicyServer is the server API for IAMPolicy service.
+// Server API for IAMPolicy service
+
type IAMPolicyServer interface {
// Sets the access control policy on the specified resource. Replaces any
// existing policy.
@@ -376,36 +305,33 @@ var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
Metadata: "google/iam/v1/iam_policy.proto",
}
-func init() {
- proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor_iam_policy_58547b5cf2e9d67a)
-}
+func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor0) }
-var fileDescriptor_iam_policy_58547b5cf2e9d67a = []byte{
- // 411 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0x04, 0x51, 0xf1, 0x05, 0xf9, 0x39, 0x99,
- 0xc9, 0x95, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xbc, 0x10, 0x79, 0xbd, 0xcc, 0xc4, 0x5c,
- 0xbd, 0x32, 0x43, 0x29, 0x19, 0xa8, 0xf2, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92,
- 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0x62, 0x29, 0x29, 0x54, 0xc3, 0x90, 0x0d, 0x52, 0x4a,
- 0xe0, 0x12, 0x0e, 0x4e, 0x2d, 0xf1, 0x4c, 0xcc, 0x0d, 0x00, 0x8b, 0x06, 0xa5, 0x16, 0x96, 0xa6,
- 0x16, 0x97, 0x08, 0x49, 0x71, 0x71, 0x14, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30,
- 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xf9, 0x42, 0xba, 0x5c, 0x6c, 0x10, 0x23, 0x24, 0x98, 0x14,
- 0x18, 0x35, 0xb8, 0x8d, 0x44, 0xf5, 0x50, 0x1c, 0xa3, 0x07, 0x35, 0x09, 0xaa, 0x48, 0xc9, 0x90,
- 0x4b, 0xd8, 0x9d, 0x34, 0x1b, 0x94, 0x22, 0xb9, 0x24, 0x43, 0x52, 0x8b, 0xc1, 0x7a, 0x52, 0x8b,
- 0x72, 0x33, 0x8b, 0x8b, 0x41, 0x9e, 0x21, 0xc6, 0x69, 0x0a, 0x5c, 0xdc, 0x05, 0x08, 0x1d, 0x12,
- 0x4c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0xc8, 0x42, 0x4a, 0x76, 0x5c, 0x52, 0xd8, 0x8c, 0x2e, 0x2e,
- 0xc8, 0xcf, 0x2b, 0xc6, 0xd0, 0xcf, 0x88, 0xa1, 0xdf, 0x68, 0x0a, 0x33, 0x17, 0xa7, 0xa7, 0xa3,
- 0x2f, 0xc4, 0x2f, 0x42, 0x25, 0x5c, 0x3c, 0xc8, 0xa1, 0x27, 0xa4, 0x84, 0x16, 0x14, 0x58, 0x82,
- 0x56, 0x0a, 0x7b, 0x70, 0x29, 0x69, 0x36, 0x5d, 0x7e, 0x32, 0x99, 0x49, 0x59, 0x49, 0x0e, 0x14,
- 0x45, 0xd5, 0x30, 0x1f, 0xd9, 0x6a, 0x69, 0xd5, 0x5a, 0x15, 0x23, 0x99, 0x62, 0xc5, 0xa8, 0x05,
- 0xb2, 0xd5, 0x1d, 0x9f, 0xad, 0xee, 0x54, 0xb1, 0x35, 0x1d, 0xcd, 0xd6, 0x59, 0x8c, 0x5c, 0x42,
- 0x98, 0x41, 0x27, 0xa4, 0x81, 0x66, 0x30, 0xce, 0x88, 0x93, 0xd2, 0x24, 0x42, 0x25, 0x24, 0x1e,
- 0x94, 0xf4, 0xc1, 0xce, 0xd2, 0x54, 0x52, 0xc1, 0x74, 0x56, 0x09, 0x86, 0x2e, 0x2b, 0x46, 0x2d,
- 0xa7, 0x36, 0x46, 0x2e, 0xc1, 0xe4, 0xfc, 0x5c, 0x54, 0x1b, 0x9c, 0xf8, 0xe0, 0x1e, 0x08, 0x00,
- 0x25, 0xf6, 0x00, 0xc6, 0x28, 0x03, 0xa8, 0x82, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc,
- 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0x70, 0x56, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x43,
- 0x73, 0x8a, 0x75, 0x66, 0x62, 0xee, 0x0f, 0x46, 0xc6, 0x55, 0x4c, 0xc2, 0xee, 0x10, 0x5d, 0xce,
- 0x39, 0xf9, 0xa5, 0x29, 0x7a, 0x9e, 0x89, 0xb9, 0x7a, 0x61, 0x86, 0xa7, 0x60, 0xa2, 0x31, 0x60,
- 0xd1, 0x18, 0xcf, 0xc4, 0xdc, 0x98, 0x30, 0xc3, 0x24, 0x36, 0xb0, 0x59, 0xc6, 0x80, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xea, 0x62, 0x8f, 0x22, 0xc1, 0x03, 0x00, 0x00,
+var fileDescriptor0 = []byte{
+ // 396 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcf, 0x4a, 0xe3, 0x40,
+ 0x18, 0x67, 0x52, 0x28, 0xdb, 0xe9, 0xee, 0xc2, 0xa6, 0x2c, 0xd4, 0x20, 0x25, 0x8c, 0x1e, 0xd2,
+ 0x80, 0x13, 0x53, 0x6f, 0x15, 0x05, 0xeb, 0x21, 0xf4, 0x20, 0x94, 0x2a, 0x82, 0x5e, 0x74, 0xac,
+ 0x43, 0x18, 0x48, 0x32, 0x31, 0x33, 0x2d, 0x88, 0x78, 0xf1, 0x15, 0xf4, 0xe4, 0x23, 0xf8, 0x3a,
+ 0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x92, 0x89, 0x35, 0x6d, 0xaa, 0x54, 0xf0, 0x54, 0x3a, 0xf3, 0xfb,
+ 0xf7, 0xfd, 0xbe, 0x0c, 0x6c, 0xf9, 0x9c, 0xfb, 0x01, 0x75, 0x18, 0x09, 0x9d, 0x89, 0x9b, 0xfe,
+ 0x9c, 0xc5, 0x3c, 0x60, 0xa3, 0x6b, 0x1c, 0x27, 0x5c, 0x72, 0xfd, 0x8f, 0xba, 0xc7, 0x8c, 0x84,
+ 0x78, 0xe2, 0x1a, 0xab, 0x39, 0x9c, 0xc4, 0xcc, 0x21, 0x51, 0xc4, 0x25, 0x91, 0x8c, 0x47, 0x42,
+ 0x81, 0x0d, 0x63, 0x56, 0xac, 0x28, 0x84, 0xce, 0x61, 0xe3, 0x90, 0xca, 0x3e, 0x09, 0x07, 0xd9,
+ 0xe9, 0x90, 0x5e, 0x8d, 0xa9, 0x90, 0xba, 0x01, 0x7f, 0x25, 0x54, 0xf0, 0x71, 0x32, 0xa2, 0x4d,
+ 0x60, 0x02, 0xab, 0x36, 0x9c, 0xfe, 0xd7, 0x37, 0x60, 0x55, 0x49, 0x34, 0x35, 0x13, 0x58, 0xf5,
+ 0xce, 0x7f, 0x3c, 0x13, 0x06, 0xe7, 0x4a, 0x39, 0x08, 0xb9, 0xb0, 0xe1, 0x7d, 0xcf, 0x01, 0x9d,
+ 0xc0, 0x95, 0x23, 0x2a, 0x32, 0x0e, 0x4d, 0x42, 0x26, 0x44, 0x3a, 0xcc, 0x32, 0xd1, 0x4c, 0x58,
+ 0x8f, 0x3f, 0x18, 0x4d, 0xcd, 0xac, 0x58, 0xb5, 0x61, 0xf1, 0x08, 0xed, 0x42, 0x63, 0x91, 0xb4,
+ 0x88, 0x79, 0x24, 0x4a, 0x7c, 0x50, 0xe2, 0x77, 0x1e, 0x2a, 0xb0, 0xd6, 0xdf, 0x3b, 0x50, 0xb3,
+ 0xe8, 0x12, 0xfe, 0x2e, 0xb6, 0xa7, 0xa3, 0xb9, 0x2a, 0x16, 0x54, 0x6b, 0x2c, 0xae, 0x0b, 0xb5,
+ 0xef, 0x9e, 0x5f, 0xee, 0xb5, 0x35, 0xd4, 0x4a, 0x57, 0x74, 0xf3, 0x3e, 0xd1, 0x8e, 0x6d, 0xdf,
+ 0x76, 0x45, 0x41, 0xa5, 0x0b, 0xec, 0xd4, 0xd5, 0xfb, 0xca, 0xd5, 0xfb, 0x11, 0x57, 0x7f, 0xce,
+ 0xf5, 0x11, 0x40, 0xbd, 0x5c, 0x9d, 0x6e, 0xcd, 0x09, 0x7f, 0xba, 0x38, 0xa3, 0xbd, 0x04, 0x52,
+ 0xed, 0x01, 0x39, 0x59, 0xac, 0x36, 0x5a, 0x2f, 0xc7, 0x92, 0x25, 0x56, 0x17, 0xd8, 0xbd, 0x18,
+ 0xfe, 0x1b, 0xf1, 0x70, 0xd6, 0xa0, 0xf7, 0x77, 0x9a, 0x7f, 0x90, 0x7e, 0xeb, 0x03, 0x70, 0xba,
+ 0x99, 0x03, 0x7c, 0x1e, 0x90, 0xc8, 0xc7, 0x3c, 0xf1, 0x1d, 0x9f, 0x46, 0xd9, 0x4b, 0x70, 0xd4,
+ 0x15, 0x89, 0x99, 0xc8, 0x1f, 0xca, 0x36, 0x23, 0xe1, 0x2b, 0x00, 0x4f, 0x5a, 0xc3, 0x53, 0xac,
+ 0xfd, 0x80, 0x8f, 0x2f, 0x71, 0x9f, 0x84, 0xf8, 0xd8, 0xbd, 0xa8, 0x66, 0xac, 0xad, 0xb7, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x6c, 0x3a, 0x2b, 0x4d, 0xaa, 0x03, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
index 99dd75f26..a22ae91be 100644
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/iam/v1/policy.proto
-package iam // import "google.golang.org/genproto/googleapis/iam/v1"
+package iam
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -13,12 +13,6 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
// The type of action performed on a Binding in a policy.
type BindingDelta_Action int32
@@ -45,9 +39,7 @@ var BindingDelta_Action_value = map[string]int32{
func (x BindingDelta_Action) String() string {
return proto.EnumName(BindingDelta_Action_name, int32(x))
}
-func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3, 0}
-}
+func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} }
// Defines an Identity and Access Management (IAM) policy. It is used to
// specify access control policies for Cloud Platform resources.
@@ -82,11 +74,11 @@ func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) {
// [IAM developer's guide](https://cloud.google.com/iam).
type Policy struct {
// Version of the `Policy`. The default version is 0.
- Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
+ Version int32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
// Associates a list of `members` to a `role`.
// Multiple `bindings` must not be specified for the same `role`.
// `bindings` with no members will result in an error.
- Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"`
+ Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings" json:"bindings,omitempty"`
// `etag` is used for optimistic concurrency control as a way to help
// prevent simultaneous updates of a policy from overwriting each other.
// It is strongly suggested that systems make use of the `etag` in the
@@ -97,35 +89,13 @@ type Policy struct {
//
// If no `etag` is provided in the call to `setIamPolicy`, then the existing
// policy is overwritten blindly.
- Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
}
-func (m *Policy) Reset() { *m = Policy{} }
-func (m *Policy) String() string { return proto.CompactTextString(m) }
-func (*Policy) ProtoMessage() {}
-func (*Policy) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{0}
-}
-func (m *Policy) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Policy.Unmarshal(m, b)
-}
-func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Policy.Marshal(b, m, deterministic)
-}
-func (dst *Policy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Policy.Merge(dst, src)
-}
-func (m *Policy) XXX_Size() int {
- return xxx_messageInfo_Policy.Size(m)
-}
-func (m *Policy) XXX_DiscardUnknown() {
- xxx_messageInfo_Policy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Policy proto.InternalMessageInfo
+func (m *Policy) Reset() { *m = Policy{} }
+func (m *Policy) String() string { return proto.CompactTextString(m) }
+func (*Policy) ProtoMessage() {}
+func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *Policy) GetVersion() int32 {
if m != nil {
@@ -153,7 +123,7 @@ type Binding struct {
// Role that is assigned to `members`.
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
// Required
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+ Role string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"`
// Specifies the identities requesting access for a Cloud Platform resource.
// `members` can have the following values:
//
@@ -177,35 +147,13 @@ type Binding struct {
// users of that domain. For example, `google.com` or `example.com`.
//
//
- Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Members []string `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
}
-func (m *Binding) Reset() { *m = Binding{} }
-func (m *Binding) String() string { return proto.CompactTextString(m) }
-func (*Binding) ProtoMessage() {}
-func (*Binding) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{1}
-}
-func (m *Binding) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Binding.Unmarshal(m, b)
-}
-func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Binding.Marshal(b, m, deterministic)
-}
-func (dst *Binding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Binding.Merge(dst, src)
-}
-func (m *Binding) XXX_Size() int {
- return xxx_messageInfo_Binding.Size(m)
-}
-func (m *Binding) XXX_DiscardUnknown() {
- xxx_messageInfo_Binding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Binding proto.InternalMessageInfo
+func (m *Binding) Reset() { *m = Binding{} }
+func (m *Binding) String() string { return proto.CompactTextString(m) }
+func (*Binding) ProtoMessage() {}
+func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (m *Binding) GetRole() string {
if m != nil {
@@ -224,35 +172,13 @@ func (m *Binding) GetMembers() []string {
// The difference delta between two policies.
type PolicyDelta struct {
// The delta for Bindings between two policies.
- BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas" json:"binding_deltas,omitempty"`
}
-func (m *PolicyDelta) Reset() { *m = PolicyDelta{} }
-func (m *PolicyDelta) String() string { return proto.CompactTextString(m) }
-func (*PolicyDelta) ProtoMessage() {}
-func (*PolicyDelta) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{2}
-}
-func (m *PolicyDelta) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PolicyDelta.Unmarshal(m, b)
-}
-func (m *PolicyDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PolicyDelta.Marshal(b, m, deterministic)
-}
-func (dst *PolicyDelta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyDelta.Merge(dst, src)
-}
-func (m *PolicyDelta) XXX_Size() int {
- return xxx_messageInfo_PolicyDelta.Size(m)
-}
-func (m *PolicyDelta) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyDelta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PolicyDelta proto.InternalMessageInfo
+func (m *PolicyDelta) Reset() { *m = PolicyDelta{} }
+func (m *PolicyDelta) String() string { return proto.CompactTextString(m) }
+func (*PolicyDelta) ProtoMessage() {}
+func (*PolicyDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta {
if m != nil {
@@ -266,43 +192,21 @@ func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta {
type BindingDelta struct {
// The action that was performed on a Binding.
// Required
- Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"`
+ Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"`
// Role that is assigned to `members`.
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
// Required
- Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+ Role string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"`
// A single identity requesting access for a Cloud Platform resource.
// Follows the same format of Binding.members.
// Required
- Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Member string `protobuf:"bytes,3,opt,name=member" json:"member,omitempty"`
}
-func (m *BindingDelta) Reset() { *m = BindingDelta{} }
-func (m *BindingDelta) String() string { return proto.CompactTextString(m) }
-func (*BindingDelta) ProtoMessage() {}
-func (*BindingDelta) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3}
-}
-func (m *BindingDelta) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BindingDelta.Unmarshal(m, b)
-}
-func (m *BindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BindingDelta.Marshal(b, m, deterministic)
-}
-func (dst *BindingDelta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BindingDelta.Merge(dst, src)
-}
-func (m *BindingDelta) XXX_Size() int {
- return xxx_messageInfo_BindingDelta.Size(m)
-}
-func (m *BindingDelta) XXX_DiscardUnknown() {
- xxx_messageInfo_BindingDelta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BindingDelta proto.InternalMessageInfo
+func (m *BindingDelta) Reset() { *m = BindingDelta{} }
+func (m *BindingDelta) String() string { return proto.CompactTextString(m) }
+func (*BindingDelta) ProtoMessage() {}
+func (*BindingDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
func (m *BindingDelta) GetAction() BindingDelta_Action {
if m != nil {
@@ -333,34 +237,33 @@ func init() {
proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value)
}
-func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor_policy_6ba2a3dcbcdd909c) }
+func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor1) }
-var fileDescriptor_policy_6ba2a3dcbcdd909c = []byte{
- // 403 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0xab, 0x13, 0x31,
- 0x14, 0x35, 0xed, 0x73, 0x6a, 0xef, 0xfb, 0xa0, 0x46, 0x28, 0xc3, 0xd3, 0x45, 0x99, 0x55, 0x57,
- 0x19, 0x5b, 0x11, 0x41, 0x57, 0xfd, 0x18, 0x65, 0x16, 0xbe, 0x37, 0x46, 0xed, 0x42, 0x0a, 0x8f,
- 0x4c, 0x1b, 0x42, 0x64, 0x92, 0x0c, 0x33, 0x63, 0xc1, 0xb5, 0xff, 0x46, 0xf0, 0x8f, 0xf8, 0x8b,
- 0x5c, 0xca, 0x24, 0x99, 0x47, 0x0b, 0xe2, 0x2e, 0xe7, 0x9e, 0x73, 0x72, 0xcf, 0xcd, 0x0d, 0x5c,
- 0x0b, 0x63, 0x44, 0xc1, 0x63, 0xc9, 0x54, 0x7c, 0x98, 0xc5, 0xa5, 0x29, 0xe4, 0xee, 0x3b, 0x29,
- 0x2b, 0xd3, 0x18, 0x7c, 0xe9, 0x38, 0x22, 0x99, 0x22, 0x87, 0xd9, 0xf5, 0x33, 0x2f, 0x65, 0xa5,
- 0x8c, 0x99, 0xd6, 0xa6, 0x61, 0x8d, 0x34, 0xba, 0x76, 0xe2, 0xe8, 0x2b, 0x04, 0x99, 0x35, 0xe3,
- 0x10, 0x06, 0x07, 0x5e, 0xd5, 0xd2, 0xe8, 0x10, 0x4d, 0xd0, 0xf4, 0x21, 0xed, 0x20, 0x9e, 0xc3,
- 0xa3, 0x5c, 0xea, 0xbd, 0xd4, 0xa2, 0x0e, 0xcf, 0x26, 0xfd, 0xe9, 0xf9, 0x7c, 0x4c, 0x4e, 0x7a,
- 0x90, 0xa5, 0xa3, 0xe9, 0xbd, 0x0e, 0x63, 0x38, 0xe3, 0x0d, 0x13, 0x61, 0x7f, 0x82, 0xa6, 0x17,
- 0xd4, 0x9e, 0xa3, 0x57, 0x30, 0xf0, 0xc2, 0x96, 0xae, 0x4c, 0xc1, 0x6d, 0xa7, 0x21, 0xb5, 0xe7,
- 0x36, 0x80, 0xe2, 0x2a, 0xe7, 0x55, 0x1d, 0xf6, 0x26, 0xfd, 0xe9, 0x90, 0x76, 0x30, 0xfa, 0x00,
- 0xe7, 0x2e, 0xe4, 0x9a, 0x17, 0x0d, 0xc3, 0x4b, 0xb8, 0xf2, 0x7d, 0xee, 0xf6, 0x6d, 0xa1, 0x0e,
- 0x91, 0x4d, 0xf5, 0xf4, 0xdf, 0xa9, 0xac, 0x89, 0x5e, 0xe6, 0x47, 0xa8, 0x8e, 0x7e, 0x21, 0xb8,
- 0x38, 0xe6, 0xf1, 0x6b, 0x08, 0xd8, 0xae, 0xe9, 0xa6, 0xbf, 0x9a, 0x47, 0xff, 0xb9, 0x8c, 0x2c,
- 0xac, 0x92, 0x7a, 0xc7, 0xfd, 0x34, 0xbd, 0xa3, 0x69, 0xc6, 0x10, 0xb8, 0xf8, 0xf6, 0x09, 0x86,
- 0xd4, 0xa3, 0xe8, 0x25, 0x04, 0xce, 0x8d, 0xc7, 0x80, 0x17, 0xab, 0x4f, 0xe9, 0xed, 0xcd, 0xdd,
- 0xe7, 0x9b, 0x8f, 0x59, 0xb2, 0x4a, 0xdf, 0xa6, 0xc9, 0x7a, 0xf4, 0x00, 0x0f, 0xa0, 0xbf, 0x58,
- 0xaf, 0x47, 0x08, 0x03, 0x04, 0x34, 0x79, 0x7f, 0xbb, 0x49, 0x46, 0xbd, 0xe5, 0x0f, 0x04, 0x8f,
- 0x77, 0x46, 0x9d, 0x86, 0x5a, 0xfa, 0x67, 0xc9, 0xda, 0x55, 0x66, 0xe8, 0xcb, 0x73, 0xcf, 0x0a,
- 0x53, 0x30, 0x2d, 0x88, 0xa9, 0x44, 0x2c, 0xb8, 0xb6, 0x8b, 0x8e, 0x1d, 0xc5, 0x4a, 0x59, 0xfb,
- 0x4f, 0xf3, 0x46, 0x32, 0xf5, 0x07, 0xa1, 0x9f, 0xbd, 0x27, 0xef, 0x9c, 0x6b, 0x55, 0x98, 0x6f,
- 0x7b, 0x92, 0x32, 0x45, 0x36, 0xb3, 0xdf, 0x5d, 0x75, 0x6b, 0xab, 0xdb, 0x94, 0xa9, 0xed, 0x66,
- 0x96, 0x07, 0xf6, 0xae, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x18, 0xca, 0xaa, 0x7f,
+var fileDescriptor1 = []byte{
+ // 387 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x8f, 0xd3, 0x30,
+ 0x10, 0xc5, 0xed, 0x92, 0xd2, 0xd9, 0x0f, 0x15, 0x23, 0x55, 0xd1, 0xc2, 0xa1, 0xca, 0x29, 0x27,
+ 0x87, 0x16, 0x21, 0x24, 0x38, 0x35, 0x4d, 0x40, 0x39, 0xb0, 0x1b, 0x0c, 0xec, 0x81, 0xcb, 0xca,
+ 0x69, 0x2d, 0xcb, 0x28, 0xb6, 0xa3, 0x24, 0x54, 0xe2, 0x2f, 0x21, 0xf1, 0xff, 0x38, 0xa2, 0xd8,
+ 0xee, 0xaa, 0x95, 0x10, 0xb7, 0x79, 0x79, 0xef, 0x65, 0xde, 0xcc, 0x18, 0xae, 0x85, 0x31, 0xa2,
+ 0xe6, 0x89, 0x64, 0x2a, 0xd9, 0x2f, 0x93, 0xc6, 0xd4, 0x72, 0xfb, 0x93, 0x34, 0xad, 0xe9, 0x0d,
+ 0xbe, 0x74, 0x1c, 0x91, 0x4c, 0x91, 0xfd, 0xf2, 0xfa, 0x85, 0x97, 0xb2, 0x46, 0x26, 0x4c, 0x6b,
+ 0xd3, 0xb3, 0x5e, 0x1a, 0xdd, 0x39, 0x71, 0xf4, 0x1d, 0x82, 0xd2, 0x9a, 0x71, 0x08, 0x93, 0x3d,
+ 0x6f, 0x3b, 0x69, 0x74, 0x88, 0x16, 0x28, 0x7e, 0x4c, 0x0f, 0x10, 0xaf, 0xe0, 0x49, 0x25, 0xf5,
+ 0x4e, 0x6a, 0xd1, 0x85, 0x67, 0x8b, 0x71, 0x7c, 0xbe, 0x9a, 0x93, 0x93, 0x1e, 0x24, 0x75, 0x34,
+ 0x7d, 0xd0, 0x61, 0x0c, 0x67, 0xbc, 0x67, 0x22, 0x1c, 0x2f, 0x50, 0x7c, 0x41, 0x6d, 0x1d, 0xbd,
+ 0x81, 0x89, 0x17, 0x0e, 0x74, 0x6b, 0x6a, 0x6e, 0x3b, 0x4d, 0xa9, 0xad, 0x87, 0x00, 0x8a, 0xab,
+ 0x8a, 0xb7, 0x5d, 0x38, 0x5a, 0x8c, 0xe3, 0x29, 0x3d, 0xc0, 0xe8, 0x13, 0x9c, 0xbb, 0x90, 0x19,
+ 0xaf, 0x7b, 0x86, 0x53, 0xb8, 0xf2, 0x7d, 0xee, 0x77, 0xc3, 0x87, 0x2e, 0x44, 0x36, 0xd5, 0xf3,
+ 0x7f, 0xa7, 0xb2, 0x26, 0x7a, 0x59, 0x1d, 0xa1, 0x2e, 0xfa, 0x8d, 0xe0, 0xe2, 0x98, 0xc7, 0x6f,
+ 0x21, 0x60, 0xdb, 0xfe, 0x30, 0xfd, 0xd5, 0x2a, 0xfa, 0xcf, 0xcf, 0xc8, 0xda, 0x2a, 0xa9, 0x77,
+ 0x3c, 0x4c, 0x33, 0x3a, 0x9a, 0x66, 0x0e, 0x81, 0x8b, 0x6f, 0x57, 0x30, 0xa5, 0x1e, 0x45, 0xaf,
+ 0x21, 0x70, 0x6e, 0x3c, 0x07, 0xbc, 0xde, 0x7c, 0x29, 0x6e, 0x6f, 0xee, 0xbf, 0xde, 0x7c, 0x2e,
+ 0xf3, 0x4d, 0xf1, 0xbe, 0xc8, 0xb3, 0xd9, 0x23, 0x3c, 0x81, 0xf1, 0x3a, 0xcb, 0x66, 0x08, 0x03,
+ 0x04, 0x34, 0xff, 0x78, 0x7b, 0x97, 0xcf, 0x46, 0xa9, 0x82, 0xa7, 0x5b, 0xa3, 0x4e, 0x33, 0xa5,
+ 0x7e, 0x2b, 0xe5, 0x70, 0xc9, 0x12, 0x7d, 0x7b, 0xe9, 0x59, 0x61, 0x6a, 0xa6, 0x05, 0x31, 0xad,
+ 0x48, 0x04, 0xd7, 0xf6, 0xce, 0x89, 0xa3, 0x58, 0x23, 0x3b, 0xff, 0x66, 0xde, 0x49, 0xa6, 0xfe,
+ 0x20, 0xf4, 0x6b, 0xf4, 0xec, 0x83, 0x73, 0x6d, 0x6a, 0xf3, 0x63, 0x47, 0x0a, 0xa6, 0xc8, 0xdd,
+ 0xb2, 0x0a, 0xac, 0xeb, 0xd5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x4a, 0x85, 0x10, 0x68,
0x02, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index 7bfe37a3d..8867ae781 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,12 +1,21 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
-package status // import "google.golang.org/genproto/googleapis/rpc/status"
+/*
+Package status is a generated protocol buffer package.
+
+It is generated from these files:
+ google/rpc/status.proto
+
+It has these top-level messages:
+ Status
+*/
+package status
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
-import any "github.com/golang/protobuf/ptypes/any"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -73,42 +82,20 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
- Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
}
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_status_c6e4de62dcdf2edf, []int{0}
-}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Status.Unmarshal(m, b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
-}
-func (dst *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(dst, src)
-}
-func (m *Status) XXX_Size() int {
- return xxx_messageInfo_Status.Size(m)
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
+func (m *Status) Reset() { *m = Status{} }
+func (m *Status) String() string { return proto.CompactTextString(m) }
+func (*Status) ProtoMessage() {}
+func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Status) GetCode() int32 {
if m != nil {
@@ -124,7 +111,7 @@ func (m *Status) GetMessage() string {
return ""
}
-func (m *Status) GetDetails() []*any.Any {
+func (m *Status) GetDetails() []*google_protobuf.Any {
if m != nil {
return m.Details
}
@@ -135,9 +122,9 @@ func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
-func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) }
+func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
-var fileDescriptor_status_c6e4de62dcdf2edf = []byte{
+var fileDescriptor0 = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
diff --git a/vendor/modules.txt b/vendor/modules.txt
index cc54c96fa..df72822fc 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -5,8 +5,6 @@ cloud.google.com/go/internal
cloud.google.com/go/internal/optional
cloud.google.com/go/internal/version
cloud.google.com/go/compute/metadata
-# contrib.go.opencensus.io/exporter/stackdriver v0.6.0
-contrib.go.opencensus.io/exporter/stackdriver/propagation
# github.com/Azure/azure-sdk-for-go v10.3.0-beta+incompatible
github.com/Azure/azure-sdk-for-go/arm/storage
github.com/Azure/azure-sdk-for-go/storage
@@ -449,7 +447,7 @@ github.com/vmihailenco/msgpack/codes
github.com/xanzy/ssh-agent
# github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557
github.com/xlab/treeprint
-# github.com/zclconf/go-cty v0.0.0-20180907002636-07dee8a1cfd4
+# github.com/zclconf/go-cty v0.0.0-20180925180032-d9b87d891d0b
github.com/zclconf/go-cty/cty
github.com/zclconf/go-cty/cty/gocty
github.com/zclconf/go-cty/cty/convert
@@ -458,21 +456,7 @@ github.com/zclconf/go-cty/cty/function
github.com/zclconf/go-cty/cty/function/stdlib
github.com/zclconf/go-cty/cty/msgpack
github.com/zclconf/go-cty/cty/set
-# go.opencensus.io v0.17.0
-go.opencensus.io/plugin/ochttp
-go.opencensus.io/trace
-go.opencensus.io/trace/propagation
-go.opencensus.io/plugin/ochttp/propagation/b3
-go.opencensus.io/stats
-go.opencensus.io/stats/view
-go.opencensus.io/tag
-go.opencensus.io/internal
-go.opencensus.io/trace/internal
-go.opencensus.io/trace/tracestate
-go.opencensus.io/stats/internal
-go.opencensus.io/internal/tagencoding
-go.opencensus.io
-# golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87
+# golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/knownhosts
@@ -492,7 +476,7 @@ golang.org/x/crypto/internal/subtle
golang.org/x/crypto/md4
golang.org/x/crypto/cast5
golang.org/x/crypto/openpgp/elgamal
-# golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
+# golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7
golang.org/x/net/context
golang.org/x/net/idna
golang.org/x/net/http2
@@ -504,13 +488,13 @@ golang.org/x/net/http2/hpack
golang.org/x/net/internal/timeseries
golang.org/x/net/html
golang.org/x/net/html/atom
-# golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
+# golang.org/x/oauth2 v0.0.0-20170928010508-bb50c06baba3
golang.org/x/oauth2/jwt
golang.org/x/oauth2
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/google
-# golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e
+# golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e
golang.org/x/sys/unix
# golang.org/x/text v0.3.0
golang.org/x/text/unicode/norm
@@ -531,7 +515,7 @@ golang.org/x/text/encoding/unicode
golang.org/x/text/internal/tag
golang.org/x/text/internal/utf8internal
golang.org/x/text/runes
-# google.golang.org/api v0.0.0-20180921000521-920bb1beccf7
+# google.golang.org/api v0.0.0-20171005000305-7a7376eff6a5
google.golang.org/api/iterator
google.golang.org/api/option
google.golang.org/api/googleapi
@@ -541,19 +525,19 @@ google.golang.org/api/internal
google.golang.org/api/googleapi/internal/uritemplates
google.golang.org/api/gensupport
google.golang.org/api/googleapi/transport
-# google.golang.org/appengine v1.1.0
+# google.golang.org/appengine v1.2.0
google.golang.org/appengine/urlfetch
-google.golang.org/appengine
google.golang.org/appengine/datastore
google.golang.org/appengine/internal
google.golang.org/appengine/internal/urlfetch
-google.golang.org/appengine/internal/app_identity
-google.golang.org/appengine/internal/modules
+google.golang.org/appengine
google.golang.org/appengine/internal/datastore
google.golang.org/appengine/internal/base
google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
-# google.golang.org/genproto v0.0.0-20180831171423-11092d34479b
+google.golang.org/appengine/internal/app_identity
+google.golang.org/appengine/internal/modules
+# google.golang.org/genproto v0.0.0-20171002232614-f676e0f3ac63
google.golang.org/genproto/googleapis/iam/v1
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/rpc/status