Merge pull request #16484 from hashicorp/f-gcloud-backend
Convert gcloud backend
This commit is contained in:
commit
55089e472d
|
@ -14,6 +14,7 @@ import (
|
|||
backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure"
|
||||
backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul"
|
||||
backendetcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3"
|
||||
backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs"
|
||||
backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
|
||||
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
|
||||
backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift"
|
||||
|
@ -47,6 +48,7 @@ func init() {
|
|||
`Warning: "azure" name is deprecated, please use "azurerm"`),
|
||||
"azurerm": func() backend.Backend { return backendAzure.New() },
|
||||
"etcdv3": func() backend.Backend { return backendetcdv3.New() },
|
||||
"gcs": func() backend.Backend { return backendGCS.New() },
|
||||
}
|
||||
|
||||
// Add the legacy remote backends that haven't yet been convertd to
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
// Package gcs implements remote storage of state on Google Cloud Storage (GCS).
|
||||
package gcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// gcsBackend implements "backend".Backend for GCS.
|
||||
// Input(), Validate() and Configure() are implemented by embedding *schema.Backend.
|
||||
// State(), DeleteState() and States() are implemented explicitly.
|
||||
type gcsBackend struct {
|
||||
*schema.Backend
|
||||
|
||||
storageClient *storage.Client
|
||||
storageContext context.Context
|
||||
|
||||
bucketName string
|
||||
prefix string
|
||||
defaultStateFile string
|
||||
|
||||
projectID string
|
||||
region string
|
||||
}
|
||||
|
||||
func New() backend.Backend {
|
||||
be := &gcsBackend{}
|
||||
be.Backend = &schema.Backend{
|
||||
ConfigureFunc: be.configure,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"bucket": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The name of the Google Cloud Storage bucket",
|
||||
},
|
||||
|
||||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Path of the default state file",
|
||||
Deprecated: "Use the \"prefix\" option instead",
|
||||
},
|
||||
|
||||
"prefix": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The directory where state files will be saved inside the bucket",
|
||||
},
|
||||
|
||||
"credentials": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Google Cloud JSON Account Key",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Google Cloud Project ID",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"region": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Region / location in which to create the bucket",
|
||||
Default: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return be
|
||||
}
|
||||
|
||||
func (b *gcsBackend) configure(ctx context.Context) error {
|
||||
if b.storageClient != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ctx is a background context with the backend config added.
|
||||
// Since no context is passed to remoteClient.Get(), .Lock(), etc. but
|
||||
// one is required for calling the GCP API, we're holding on to this
|
||||
// context here and re-use it later.
|
||||
b.storageContext = ctx
|
||||
|
||||
data := schema.FromContextBackendConfig(b.storageContext)
|
||||
|
||||
b.bucketName = data.Get("bucket").(string)
|
||||
b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/")
|
||||
|
||||
b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/")
|
||||
|
||||
b.projectID = data.Get("project").(string)
|
||||
if id := os.Getenv("GOOGLE_PROJECT"); b.projectID == "" && id != "" {
|
||||
b.projectID = id
|
||||
}
|
||||
b.region = data.Get("region").(string)
|
||||
if r := os.Getenv("GOOGLE_REGION"); b.projectID == "" && r != "" {
|
||||
b.region = r
|
||||
}
|
||||
|
||||
opts := []option.ClientOption{
|
||||
option.WithScopes(storage.ScopeReadWrite),
|
||||
option.WithUserAgent(terraform.UserAgentString()),
|
||||
}
|
||||
if credentialsFile := data.Get("credentials").(string); credentialsFile != "" {
|
||||
opts = append(opts, option.WithCredentialsFile(credentialsFile))
|
||||
} else if credentialsFile := os.Getenv("GOOGLE_CREDENTIALS"); credentialsFile != "" {
|
||||
opts = append(opts, option.WithCredentialsFile(credentialsFile))
|
||||
}
|
||||
|
||||
client, err := storage.NewClient(b.storageContext, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage.NewClient() failed: %v", err)
|
||||
}
|
||||
|
||||
b.storageClient = client
|
||||
|
||||
return b.ensureBucketExists()
|
||||
}
|
||||
|
||||
func (b *gcsBackend) ensureBucketExists() error {
|
||||
_, err := b.storageClient.Bucket(b.bucketName).Attrs(b.storageContext)
|
||||
if err != storage.ErrBucketNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.projectID == "" {
|
||||
return fmt.Errorf("bucket %q does not exist; specify the \"project\" option or create the bucket manually using `gsutil mb gs://%s`", b.bucketName, b.bucketName)
|
||||
}
|
||||
|
||||
attrs := &storage.BucketAttrs{
|
||||
Location: b.region,
|
||||
}
|
||||
|
||||
return b.storageClient.Bucket(b.bucketName).Create(b.storageContext, b.projectID, attrs)
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
package gcs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
const (
|
||||
stateFileSuffix = ".tfstate"
|
||||
lockFileSuffix = ".tflock"
|
||||
)
|
||||
|
||||
// States returns a list of names for the states found on GCS. The default
|
||||
// state is always returned as the first element in the slice.
|
||||
func (b *gcsBackend) States() ([]string, error) {
|
||||
states := []string{backend.DefaultStateName}
|
||||
|
||||
bucket := b.storageClient.Bucket(b.bucketName)
|
||||
objs := bucket.Objects(b.storageContext, &storage.Query{
|
||||
Delimiter: "/",
|
||||
Prefix: b.prefix,
|
||||
})
|
||||
for {
|
||||
attrs, err := objs.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying Cloud Storage failed: %v", err)
|
||||
}
|
||||
|
||||
name := path.Base(attrs.Name)
|
||||
if !strings.HasSuffix(name, stateFileSuffix) {
|
||||
continue
|
||||
}
|
||||
st := strings.TrimSuffix(name, stateFileSuffix)
|
||||
|
||||
if st != backend.DefaultStateName {
|
||||
states = append(states, st)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(states[1:])
|
||||
return states, nil
|
||||
}
|
||||
|
||||
// DeleteState deletes the named state. The "default" state cannot be deleted.
|
||||
func (b *gcsBackend) DeleteState(name string) error {
|
||||
if name == backend.DefaultStateName {
|
||||
return fmt.Errorf("cowardly refusing to delete the %q state", name)
|
||||
}
|
||||
|
||||
c, err := b.client(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.Delete()
|
||||
}
|
||||
|
||||
// client returns a remoteClient for the named state.
|
||||
func (b *gcsBackend) client(name string) (*remoteClient, error) {
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("%q is not a valid state name", name)
|
||||
}
|
||||
|
||||
return &remoteClient{
|
||||
storageContext: b.storageContext,
|
||||
storageClient: b.storageClient,
|
||||
bucketName: b.bucketName,
|
||||
stateFilePath: b.stateFile(name),
|
||||
lockFilePath: b.lockFile(name),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// State reads and returns the named state from GCS. If the named state does
|
||||
// not yet exist, a new state file is created.
|
||||
func (b *gcsBackend) State(name string) (state.State, error) {
|
||||
c, err := b.client(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st := &remote.State{Client: c}
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockID, err := st.Lock(lockInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Local helper function so we can call it multiple places
|
||||
unlock := func(baseErr error) error {
|
||||
if err := st.Unlock(lockID); err != nil {
|
||||
const unlockErrMsg = `%v
|
||||
Additionally, unlocking the state file on Google Cloud Storage failed:
|
||||
|
||||
Error message: %q
|
||||
Lock ID (gen): %v
|
||||
Lock file URL: %v
|
||||
|
||||
You may have to force-unlock this state in order to use it again.
|
||||
The GCloud backend acquires a lock during initialization to ensure
|
||||
the initial state file is created.`
|
||||
return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL())
|
||||
}
|
||||
|
||||
return baseErr
|
||||
}
|
||||
|
||||
// Grab the value
|
||||
if err := st.RefreshState(); err != nil {
|
||||
return nil, unlock(err)
|
||||
}
|
||||
|
||||
// If we have no state, we have to create an empty state
|
||||
if v := st.State(); v == nil {
|
||||
if err := st.WriteState(terraform.NewState()); err != nil {
|
||||
return nil, unlock(err)
|
||||
}
|
||||
if err := st.PersistState(); err != nil {
|
||||
return nil, unlock(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock, the state should now be initialized
|
||||
if err := unlock(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (b *gcsBackend) stateFile(name string) string {
|
||||
if name == backend.DefaultStateName && b.defaultStateFile != "" {
|
||||
return b.defaultStateFile
|
||||
}
|
||||
return path.Join(b.prefix, name+stateFileSuffix)
|
||||
}
|
||||
|
||||
func (b *gcsBackend) lockFile(name string) string {
|
||||
if name == backend.DefaultStateName && b.defaultStateFile != "" {
|
||||
return strings.TrimSuffix(b.defaultStateFile, stateFileSuffix) + lockFileSuffix
|
||||
}
|
||||
return path.Join(b.prefix, name+lockFileSuffix)
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
package gcs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
func TestStateFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
prefix string
|
||||
defaultStateFile string
|
||||
name string
|
||||
wantStateFile string
|
||||
wantLockFile string
|
||||
}{
|
||||
{"state", "", "default", "state/default.tfstate", "state/default.tflock"},
|
||||
{"state", "", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "legacy.tfstate", "default", "legacy.tfstate", "legacy.tflock"},
|
||||
{"state", "legacy.tfstate", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "legacy.state", "default", "legacy.state", "legacy.state.tflock"},
|
||||
{"state", "legacy.state", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
b := &gcsBackend{
|
||||
prefix: c.prefix,
|
||||
defaultStateFile: c.defaultStateFile,
|
||||
}
|
||||
|
||||
if got := b.stateFile(c.name); got != c.wantStateFile {
|
||||
t.Errorf("stateFile(%q) = %q, want %q", c.name, got, c.wantStateFile)
|
||||
}
|
||||
|
||||
if got := b.lockFile(c.name); got != c.wantLockFile {
|
||||
t.Errorf("lockFile(%q) = %q, want %q", c.name, got, c.wantLockFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
|
||||
ss, err := be.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("be.State(%q) = %v", backend.DefaultStateName, err)
|
||||
}
|
||||
|
||||
rs, ok := ss.(*remote.State)
|
||||
if !ok {
|
||||
t.Fatalf("be.State(): got a %T, want a *remote.State", ss)
|
||||
}
|
||||
|
||||
remote.TestClient(t, rs.Client)
|
||||
}
|
||||
|
||||
func TestRemoteLocks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
|
||||
remoteClient := func() (remote.Client, error) {
|
||||
ss, err := be.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rs, ok := ss.(*remote.State)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("be.State(): got a %T, want a *remote.State", ss)
|
||||
}
|
||||
|
||||
return rs.Client, nil
|
||||
}
|
||||
|
||||
c0, err := remoteClient()
|
||||
if err != nil {
|
||||
t.Fatalf("remoteClient(0) = %v", err)
|
||||
}
|
||||
c1, err := remoteClient()
|
||||
if err != nil {
|
||||
t.Fatalf("remoteClient(1) = %v", err)
|
||||
}
|
||||
|
||||
remote.TestRemoteLocks(t, c0, c1)
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
be0 := setupBackend(t)
|
||||
defer teardownBackend(t, be0)
|
||||
|
||||
be1 := setupBackend(t)
|
||||
|
||||
backend.TestBackend(t, be0, be1)
|
||||
}
|
||||
|
||||
// setupBackend returns a new GCS backend.
|
||||
func setupBackend(t *testing.T) backend.Backend {
|
||||
t.Helper()
|
||||
|
||||
projectID := os.Getenv("GOOGLE_PROJECT")
|
||||
if projectID == "" || os.Getenv("TF_ACC") == "" {
|
||||
t.Skip("This test creates a bucket in GCS and populates it. " +
|
||||
"Since this may incur costs, it will only run if " +
|
||||
"the TF_ACC and GOOGLE_PROJECT environment variables are set.")
|
||||
}
|
||||
|
||||
config := map[string]interface{}{
|
||||
"project": projectID,
|
||||
"bucket": toBucketName(projectID + "-" + t.Name()),
|
||||
"prefix": "",
|
||||
}
|
||||
|
||||
if creds := os.Getenv("GOOGLE_CREDENTIALS"); creds != "" {
|
||||
config["credentials"] = creds
|
||||
t.Logf("using credentials from %q", creds)
|
||||
} else {
|
||||
t.Log("using default credentials; set GOOGLE_CREDENTIALS for custom credentials")
|
||||
}
|
||||
|
||||
return backend.TestBackendConfig(t, New(), config)
|
||||
}
|
||||
|
||||
// teardownBackend deletes all states from be except the default state.
|
||||
func teardownBackend(t *testing.T, be backend.Backend) {
|
||||
t.Helper()
|
||||
|
||||
// Delete all states. The bucket must be empty before it can be deleted.
|
||||
states, err := be.States()
|
||||
if err != nil {
|
||||
t.Fatalf("be.States() = %v; manual clean-up may be required", err)
|
||||
}
|
||||
for _, st := range states {
|
||||
if st == backend.DefaultStateName {
|
||||
continue
|
||||
}
|
||||
if err := be.DeleteState(st); err != nil {
|
||||
t.Fatalf("be.DeleteState(%q) = %v; manual clean-up may be required", st, err)
|
||||
}
|
||||
}
|
||||
|
||||
gcsBE, ok := be.(*gcsBackend)
|
||||
if !ok {
|
||||
t.Fatalf("be is a %T, want a *gcsBackend", be)
|
||||
}
|
||||
ctx := gcsBE.storageContext
|
||||
|
||||
// Delete the default state, which DeleteState() will refuse to do.
|
||||
// It's okay if this fails, not all tests create a default state.
|
||||
if err := gcsBE.storageClient.Bucket(gcsBE.bucketName).Object("default.tfstate").Delete(ctx); err != nil {
|
||||
t.Logf("deleting \"default.tfstate\": %v; manual clean-up may be required", err)
|
||||
}
|
||||
|
||||
// Delete the bucket itself.
|
||||
if err := gcsBE.storageClient.Bucket(gcsBE.bucketName).Delete(ctx); err != nil {
|
||||
t.Fatalf("deleting bucket failed: %v; manual cleanup may be required, though later test runs will happily reuse an existing bucket", err)
|
||||
}
|
||||
}
|
||||
|
||||
// toBucketName returns a copy of in that is suitable for use as a bucket name.
|
||||
// All upper case characters are converted to lower case, other invalid
|
||||
// characters are replaced by '_'.
|
||||
func toBucketName(in string) string {
|
||||
// Bucket names must contain only lowercase letters, numbers, dashes
|
||||
// (-), and underscores (_).
|
||||
isValid := func(r rune) bool {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z':
|
||||
return true
|
||||
case r >= '0' && r <= '9':
|
||||
return true
|
||||
case r == '-' || r == '_':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]rune, 0, len(in))
|
||||
for _, r := range strings.ToLower(in) {
|
||||
if !isValid(r) {
|
||||
r = '_'
|
||||
}
|
||||
out = append(out, r)
|
||||
}
|
||||
|
||||
// Bucket names must contain 3 to 63 characters.
|
||||
if len(out) > 63 {
|
||||
out = out[:63]
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
package gcs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// remoteClient is used by "state/remote".State to read and write
|
||||
// blobs representing state.
|
||||
// Implements "state/remote".ClientLocker
|
||||
type remoteClient struct {
|
||||
storageContext context.Context
|
||||
storageClient *storage.Client
|
||||
bucketName string
|
||||
stateFilePath string
|
||||
lockFilePath string
|
||||
}
|
||||
|
||||
func (c *remoteClient) Get() (payload *remote.Payload, err error) {
|
||||
stateFileReader, err := c.stateFile().NewReader(c.storageContext)
|
||||
if err != nil {
|
||||
if err == storage.ErrObjectNotExist {
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Failed to open state file at %v: %v", c.stateFileURL(), err)
|
||||
}
|
||||
}
|
||||
defer stateFileReader.Close()
|
||||
|
||||
stateFileContents, err := ioutil.ReadAll(stateFileReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read state file from %v: %v", c.stateFileURL(), err)
|
||||
}
|
||||
|
||||
stateFileAttrs, err := c.stateFile().Attrs(c.storageContext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read state file attrs from %v: %v", c.stateFileURL(), err)
|
||||
}
|
||||
|
||||
result := &remote.Payload{
|
||||
Data: stateFileContents,
|
||||
MD5: stateFileAttrs.MD5,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) Put(data []byte) error {
|
||||
err := func() error {
|
||||
stateFileWriter := c.stateFile().NewWriter(c.storageContext)
|
||||
if _, err := stateFileWriter.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
return stateFileWriter.Close()
|
||||
}()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to upload state to %v: %v", c.stateFileURL(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) Delete() error {
|
||||
if err := c.stateFile().Delete(c.storageContext); err != nil {
|
||||
return fmt.Errorf("Failed to delete state file %v: %v", c.stateFileURL(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock writes to a lock file, ensuring file creation. Returns the generation
|
||||
// number, which must be passed to Unlock().
|
||||
func (c *remoteClient) Lock(info *state.LockInfo) (string, error) {
|
||||
infoJson, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lockFile := c.lockFile()
|
||||
w := lockFile.If(storage.Conditions{DoesNotExist: true}).NewWriter(c.storageContext)
|
||||
err = func() error {
|
||||
if _, err := w.Write(infoJson); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.Close()
|
||||
}()
|
||||
if err != nil {
|
||||
return "", c.lockError(fmt.Errorf("writing %q failed: %v", c.lockFileURL(), err))
|
||||
}
|
||||
|
||||
info.ID = strconv.FormatInt(w.Attrs().Generation, 10)
|
||||
info.Path = c.lockFileURL()
|
||||
|
||||
return info.ID, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) Unlock(id string) error {
|
||||
gen, err := strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil {
|
||||
return c.lockError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) lockError(err error) *state.LockError {
|
||||
lockErr := &state.LockError{
|
||||
Err: err,
|
||||
}
|
||||
|
||||
info, infoErr := c.lockInfo()
|
||||
if infoErr != nil {
|
||||
lockErr.Err = multierror.Append(lockErr.Err, infoErr)
|
||||
} else {
|
||||
lockErr.Info = info
|
||||
}
|
||||
return lockErr
|
||||
}
|
||||
|
||||
// lockInfo reads the lock file, parses its contents and returns the parsed
|
||||
// LockInfo struct.
|
||||
func (c *remoteClient) lockInfo() (*state.LockInfo, error) {
|
||||
r, err := c.lockFile().NewReader(c.storageContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
rawData, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := &state.LockInfo{}
|
||||
if err := json.Unmarshal(rawData, info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) stateFile() *storage.ObjectHandle {
|
||||
return c.storageClient.Bucket(c.bucketName).Object(c.stateFilePath)
|
||||
}
|
||||
|
||||
func (c *remoteClient) stateFileURL() string {
|
||||
return fmt.Sprintf("gs://%v/%v", c.bucketName, c.stateFilePath)
|
||||
}
|
||||
|
||||
func (c *remoteClient) lockFile() *storage.ObjectHandle {
|
||||
return c.storageClient.Bucket(c.bucketName).Object(c.lockFilePath)
|
||||
}
|
||||
|
||||
func (c *remoteClient) lockFileURL() string {
|
||||
return fmt.Sprintf("gs://%v/%v", c.bucketName, c.lockFilePath)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
func TestGCSClient_impl(t *testing.T) {
|
||||
var _ Client = new(GCSClient)
|
||||
}
|
||||
|
||||
func TestGCSClient(t *testing.T) {
|
||||
// This test creates a bucket in GCS and populates it.
|
||||
// It may incur costs, so it will only run if GCS credential environment
|
||||
// variables are present.
|
||||
|
||||
projectID := os.Getenv("GOOGLE_PROJECT")
|
||||
if projectID == "" {
|
||||
t.Skipf("skipping; GOOGLE_PROJECT must be set")
|
||||
}
|
||||
|
||||
bucketName := fmt.Sprintf("terraform-remote-gcs-test-%x", time.Now().Unix())
|
||||
keyName := "testState"
|
||||
testData := []byte(`testing data`)
|
||||
|
||||
config := make(map[string]string)
|
||||
config["bucket"] = bucketName
|
||||
config["path"] = keyName
|
||||
|
||||
client, err := gcsFactory(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error for valid config: %v", err)
|
||||
}
|
||||
|
||||
gcsClient := client.(*GCSClient)
|
||||
nativeClient := gcsClient.clientStorage
|
||||
|
||||
// Be clear about what we're doing in case the user needs to clean
|
||||
// this up later.
|
||||
if _, err := nativeClient.Buckets.Get(bucketName).Do(); err == nil {
|
||||
fmt.Printf("Bucket %s already exists - skipping buckets.insert call.", bucketName)
|
||||
} else {
|
||||
// Create a bucket.
|
||||
if res, err := nativeClient.Buckets.Insert(projectID, &storage.Bucket{Name: bucketName}).Do(); err == nil {
|
||||
fmt.Printf("Created bucket %v at location %v\n\n", res.Name, res.SelfLink)
|
||||
} else {
|
||||
t.Skipf("Failed to create test GCS bucket, so skipping")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we can perform a PUT request with the encryption header
|
||||
err = gcsClient.Put(testData)
|
||||
if err != nil {
|
||||
t.Logf("WARNING: Failed to send test data to GCS bucket. (error was %s)", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Delete the test bucket in the project
|
||||
if err := gcsClient.clientStorage.Buckets.Delete(bucketName).Do(); err != nil {
|
||||
t.Logf("WARNING: Failed to delete the test GCS bucket. It has been left in your GCE account and may incur storage charges. (error was %s)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
testClient(t, client)
|
||||
}
|
|
@ -47,7 +47,6 @@ func NewClient(t string, conf map[string]string) (Client, error) {
|
|||
var BuiltinClients = map[string]Factory{
|
||||
"artifactory": artifactoryFactory,
|
||||
"etcd": etcdFactory,
|
||||
"gcs": gcsFactory,
|
||||
"http": httpFactory,
|
||||
"local": fileFactory,
|
||||
"manta": mantaFactory,
|
||||
|
|
|
@ -0,0 +1,256 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package iam supports the resource-specific operations of Google Cloud
|
||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
||||
// See https://cloud.google.com/iam for more about IAM.
|
||||
//
|
||||
// Users of the Google Cloud Libraries will typically not use this package
|
||||
// directly. Instead they will begin with some resource that supports IAM, like
|
||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
||||
package iam
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||
type client interface {
|
||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||
}
|
||||
|
||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||
type grpcClient struct {
|
||||
c pb.IAMPolicyClient
|
||||
}
|
||||
|
||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto, nil
|
||||
}
|
||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Policy: p,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: resource,
|
||||
Permissions: perms,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
// A Handle provides IAM operations for a resource.
|
||||
type Handle struct {
|
||||
c client
|
||||
resource string
|
||||
}
|
||||
|
||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandle returns a Handle for resource.
|
||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
||||
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// client implementation.
|
||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||
return &Handle{
|
||||
c: c,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||
proto, err := h.c.Get(ctx, h.resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Policy{InternalProto: proto}, nil
|
||||
}
|
||||
|
||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||
//
|
||||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
||||
|
||||
// A RoleName is a name representing a collection of permissions.
|
||||
type RoleName string
|
||||
|
||||
// Common role names.
|
||||
const (
|
||||
Owner RoleName = "roles/owner"
|
||||
Editor RoleName = "roles/editor"
|
||||
Viewer RoleName = "roles/viewer"
|
||||
)
|
||||
|
||||
const (
|
||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
||||
AllUsers = "allUsers"
|
||||
|
||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// A Policy is a list of Bindings representing roles
|
||||
// granted to members.
|
||||
//
|
||||
// The zero Policy is a valid policy with no bindings.
|
||||
type Policy struct {
|
||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
||||
// and provide an exported alias here.
|
||||
|
||||
// This field is exported for use by the Google Cloud Libraries only.
|
||||
// It may become unexported in a future release.
|
||||
InternalProto *pb.Policy
|
||||
}
|
||||
|
||||
// Members returns the list of members with the supplied role.
|
||||
// The return value should not be modified. Use Add and Remove
|
||||
// to modify the members of a role.
|
||||
func (p *Policy) Members(r RoleName) []string {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.Members
|
||||
}
|
||||
|
||||
// HasRole reports whether member has role r.
|
||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
||||
return memberIndex(member, p.binding(r)) >= 0
|
||||
}
|
||||
|
||||
// Add adds member member to role r if it is not already present.
|
||||
// A new binding is created if there is no binding for the role.
|
||||
func (p *Policy) Add(member string, r RoleName) {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
if p.InternalProto == nil {
|
||||
p.InternalProto = &pb.Policy{}
|
||||
}
|
||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
||||
Role: string(r),
|
||||
Members: []string{member},
|
||||
})
|
||||
return
|
||||
}
|
||||
if memberIndex(member, b) < 0 {
|
||||
b.Members = append(b.Members, member)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes member from role r if it is present.
|
||||
func (p *Policy) Remove(member string, r RoleName) {
|
||||
bi := p.bindingIndex(r)
|
||||
if bi < 0 {
|
||||
return
|
||||
}
|
||||
bindings := p.InternalProto.Bindings
|
||||
b := bindings[bi]
|
||||
mi := memberIndex(member, b)
|
||||
if mi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||
// into the removed spot and shrink the slice.
|
||||
if len(b.Members) == 1 {
|
||||
// Remove binding.
|
||||
last := len(bindings) - 1
|
||||
bindings[bi] = bindings[last]
|
||||
bindings[last] = nil
|
||||
p.InternalProto.Bindings = bindings[:last]
|
||||
return
|
||||
}
|
||||
// Remove member.
|
||||
// TODO(jba): worry about multiple copies of m?
|
||||
last := len(b.Members) - 1
|
||||
b.Members[mi] = b.Members[last]
|
||||
b.Members[last] = ""
|
||||
b.Members = b.Members[:last]
|
||||
}
|
||||
|
||||
// Roles returns the names of all the roles that appear in the Policy.
|
||||
func (p *Policy) Roles() []RoleName {
|
||||
if p.InternalProto == nil {
|
||||
return nil
|
||||
}
|
||||
var rns []RoleName
|
||||
for _, b := range p.InternalProto.Bindings {
|
||||
rns = append(rns, RoleName(b.Role))
|
||||
}
|
||||
return rns
|
||||
}
|
||||
|
||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||
i := p.bindingIndex(r)
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return p.InternalProto.Bindings[i]
|
||||
}
|
||||
|
||||
func (p *Policy) bindingIndex(r RoleName) int {
|
||||
if p.InternalProto == nil {
|
||||
return -1
|
||||
}
|
||||
for i, b := range p.InternalProto.Bindings {
|
||||
if b.Role == string(r) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||
func memberIndex(m string, b *pb.Binding) int {
|
||||
if b == nil {
|
||||
return -1
|
||||
}
|
||||
for i, mm := range b.Members {
|
||||
if mm == m {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package optional provides versions of primitive types that can
|
||||
// be nil. These are useful in methods that update some of an API object's
|
||||
// fields.
|
||||
package optional
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// Bool is either a bool or nil.
|
||||
Bool interface{}
|
||||
|
||||
// String is either a string or nil.
|
||||
String interface{}
|
||||
|
||||
// Int is either an int or nil.
|
||||
Int interface{}
|
||||
|
||||
// Uint is either a uint or nil.
|
||||
Uint interface{}
|
||||
|
||||
// Float64 is either a float64 or nil.
|
||||
Float64 interface{}
|
||||
|
||||
// Duration is either a time.Duration or nil.
|
||||
Duration interface{}
|
||||
)
|
||||
|
||||
// ToBool returns its argument as a bool.
|
||||
// It panics if its argument is nil or not a bool.
|
||||
func ToBool(v Bool) bool {
|
||||
x, ok := v.(bool)
|
||||
if !ok {
|
||||
doPanic("Bool", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToString returns its argument as a string.
|
||||
// It panics if its argument is nil or not a string.
|
||||
func ToString(v String) string {
|
||||
x, ok := v.(string)
|
||||
if !ok {
|
||||
doPanic("String", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToInt returns its argument as an int.
|
||||
// It panics if its argument is nil or not an int.
|
||||
func ToInt(v Int) int {
|
||||
x, ok := v.(int)
|
||||
if !ok {
|
||||
doPanic("Int", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToUint returns its argument as a uint.
|
||||
// It panics if its argument is nil or not a uint.
|
||||
func ToUint(v Uint) uint {
|
||||
x, ok := v.(uint)
|
||||
if !ok {
|
||||
doPanic("Uint", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToFloat64 returns its argument as a float64.
|
||||
// It panics if its argument is nil or not a float64.
|
||||
func ToFloat64(v Float64) float64 {
|
||||
x, ok := v.(float64)
|
||||
if !ok {
|
||||
doPanic("Float64", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToDuration returns its argument as a time.Duration.
|
||||
// It panics if its argument is nil or not a time.Duration.
|
||||
func ToDuration(v Duration) time.Duration {
|
||||
x, ok := v.(time.Duration)
|
||||
if !ok {
|
||||
doPanic("Duration", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func doPanic(capType string, v interface{}) {
|
||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=$(date +%Y%m%d)
|
||||
|
||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate ./update_version.sh
|
||||
|
||||
// Package version contains version information for Google Cloud Client
|
||||
// Libraries for Go, as reported in request headers.
|
||||
package version
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20170928"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
func Go() string {
|
||||
return goVersion
|
||||
}
|
||||
|
||||
var goVersion = goVer(runtime.Version())
|
||||
|
||||
const develPrefix = "devel +"
|
||||
|
||||
func goVer(s string) string {
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func notSemverRune(r rune) bool {
|
||||
return strings.IndexRune("0123456789.", r) < 0
|
||||
}
|
|
@ -0,0 +1,252 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
type ACLRole string
|
||||
|
||||
const (
|
||||
RoleOwner ACLRole = "OWNER"
|
||||
RoleReader ACLRole = "READER"
|
||||
RoleWriter ACLRole = "WRITER"
|
||||
)
|
||||
|
||||
// ACLEntity refers to a user or group.
|
||||
// They are sometimes referred to as grantees.
|
||||
//
|
||||
// It could be in the form of:
|
||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
||||
// "domain-<domain>" and "project-team-<projectId>".
|
||||
//
|
||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
||||
type ACLEntity string
|
||||
|
||||
const (
|
||||
AllUsers ACLEntity = "allUsers"
|
||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
|
||||
type ACLRule struct {
|
||||
Entity ACLEntity
|
||||
Role ACLRole
|
||||
}
|
||||
|
||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||
type ACLHandle struct {
|
||||
c *Client
|
||||
bucket string
|
||||
object string
|
||||
isDefault bool
|
||||
userProject string // for requester-pays buckets
|
||||
}
|
||||
|
||||
// Delete permanently deletes the ACL entry for the given entity.
|
||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
|
||||
if a.object != "" {
|
||||
return a.objectDelete(ctx, entity)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultDelete(ctx, entity)
|
||||
}
|
||||
return a.bucketDelete(ctx, entity)
|
||||
}
|
||||
|
||||
// Set sets the permission level for the given entity.
|
||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
if a.object != "" {
|
||||
return a.objectSet(ctx, entity, role, false)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.objectSet(ctx, entity, role, true)
|
||||
}
|
||||
return a.bucketSet(ctx, entity, role)
|
||||
}
|
||||
|
||||
// List retrieves ACL entries.
|
||||
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
|
||||
if a.object != "" {
|
||||
return a.objectList(ctx)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultList(ctx)
|
||||
}
|
||||
return a.bucketList(ctx)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
|
||||
}
|
||||
return toACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
|
||||
}
|
||||
r := make([]ACLRule, len(acls.Items))
|
||||
for i, v := range acls.Items {
|
||||
r[i].Entity = ACLEntity(v.Entity)
|
||||
r[i].Role = ACLRole(v.Role)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(req, ctx)
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
|
||||
}
|
||||
return toACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
}
|
||||
a.configureCall(req, ctx)
|
||||
err := runWithRetry(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if isBucketDefault {
|
||||
return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
} else {
|
||||
return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(call interface {
|
||||
Header() http.Header
|
||||
}, ctx context.Context) {
|
||||
vc := reflect.ValueOf(call)
|
||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||
if a.userProject != "" {
|
||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
}
|
||||
|
||||
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
||||
r := make([]ACLRule, 0, len(items))
|
||||
for _, item := range items {
|
||||
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
|
||||
}
|
||||
return r
|
||||
}
|
|
@ -0,0 +1,761 @@
|
|||
// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// BucketHandle provides operations on a Google Cloud Storage bucket.
|
||||
// Use Client.Bucket to get a handle.
|
||||
type BucketHandle struct {
|
||||
c *Client
|
||||
name string
|
||||
acl ACLHandle
|
||||
defaultObjectACL ACLHandle
|
||||
conds *BucketConditions
|
||||
userProject string // project for requester-pays buckets
|
||||
}
|
||||
|
||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
||||
// This call does not perform any network operations.
|
||||
//
|
||||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
||||
// underscores, and dots. The full specification for valid bucket names can be
|
||||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
return &BucketHandle{
|
||||
c: c,
|
||||
name: name,
|
||||
acl: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
},
|
||||
defaultObjectACL: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
isDefault: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates the Bucket in the project.
|
||||
// If attrs is nil the API defaults will be used.
|
||||
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
bkt = attrs.toRawBucket()
|
||||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
}
|
||||
bkt.Name = b.name
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
bkt.Location = "US"
|
||||
}
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
func (b *BucketHandle) Delete(ctx context.Context) error {
|
||||
req, err := b.newDeleteCall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
||||
// This controls who can list, create or overwrite the objects in a bucket.
|
||||
// This call does not perform any network operations.
|
||||
func (b *BucketHandle) ACL() *ACLHandle {
|
||||
return &b.acl
|
||||
}
|
||||
|
||||
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
|
||||
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
|
||||
// This call does not perform any network operations.
|
||||
func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
||||
return &b.defaultObjectACL
|
||||
}
|
||||
|
||||
// Object returns an ObjectHandle, which provides operations on the named object.
|
||||
// This call does not perform any network operations.
|
||||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
return &ObjectHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
object: name,
|
||||
acl: ACLHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
object: name,
|
||||
userProject: b.userProject,
|
||||
},
|
||||
gen: -1,
|
||||
userProject: b.userProject,
|
||||
}
|
||||
}
|
||||
|
||||
// Attrs returns the metadata for the bucket.
|
||||
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
|
||||
req, err := b.newGetCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = runWithRetry(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(resp), nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
|
||||
req, err := b.newPatchCall(&uattrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(jba): retry iff metagen is set?
|
||||
rb, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rb), nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
rb := uattrs.toRawBucket()
|
||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
|
||||
// Read-only fields are ignored by BucketHandle.Create.
|
||||
type BucketAttrs struct {
|
||||
// Name is the name of the bucket.
|
||||
// This field is read-only.
|
||||
Name string
|
||||
|
||||
// ACL is the list of access control rules on the bucket.
|
||||
ACL []ACLRule
|
||||
|
||||
// DefaultObjectACL is the list of access controls to
|
||||
// apply to new objects when no object ACL is provided.
|
||||
DefaultObjectACL []ACLRule
|
||||
|
||||
// Location is the location of the bucket. It defaults to "US".
|
||||
Location string
|
||||
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
// This field is read-only.
|
||||
MetaGeneration int64
|
||||
|
||||
// StorageClass is the default storage class of the bucket. This defines
|
||||
// how objects in the bucket are stored and determines the SLA
|
||||
// and the cost of storage. Typical values are "MULTI_REGIONAL",
|
||||
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
|
||||
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
|
||||
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
|
||||
// the bucket's location settings.
|
||||
StorageClass string
|
||||
|
||||
// Created is the creation time of the bucket.
|
||||
// This field is read-only.
|
||||
Created time.Time
|
||||
|
||||
// VersioningEnabled reports whether this bucket has versioning enabled.
|
||||
VersioningEnabled bool
|
||||
|
||||
// Labels are the bucket's labels.
|
||||
Labels map[string]string
|
||||
|
||||
// RequesterPays reports whether the bucket is a Requester Pays bucket.
|
||||
RequesterPays bool
|
||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
||||
Lifecycle Lifecycle
|
||||
}
|
||||
|
||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
||||
type Lifecycle struct {
|
||||
Rules []LifecycleRule
|
||||
}
|
||||
|
||||
const (
|
||||
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
|
||||
rfc3339Date = "2006-01-02"
|
||||
|
||||
// DeleteAction is a lifecycle action that deletes a live and/or archived
|
||||
// objects. Takes precendence over SetStorageClass actions.
|
||||
DeleteAction = "Delete"
|
||||
|
||||
// SetStorageClassAction changes the storage class of live and/or archived
|
||||
// objects.
|
||||
SetStorageClassAction = "SetStorageClass"
|
||||
)
|
||||
|
||||
// LifecycleRule is a lifecycle configuration rule.
|
||||
//
|
||||
// When all the configured conditions are met by an object in the bucket, the
|
||||
// configured action will automatically be taken on that object.
|
||||
type LifecycleRule struct {
|
||||
// Action is the action to take when all of the associated conditions are
|
||||
// met.
|
||||
Action LifecycleAction
|
||||
|
||||
// Condition is the set of conditions that must be met for the associated
|
||||
// action to be taken.
|
||||
Condition LifecycleCondition
|
||||
}
|
||||
|
||||
// LifecycleAction is a lifecycle configuration action.
|
||||
type LifecycleAction struct {
|
||||
// Type is the type of action to take on matching objects.
|
||||
//
|
||||
// Acceptable values are "Delete" to delete matching objects and
|
||||
// "SetStorageClass" to set the storage class defined in StorageClass on
|
||||
// matching objects.
|
||||
Type string
|
||||
|
||||
// StorageClass is the storage class to set on matching objects if the Action
|
||||
// is "SetStorageClass".
|
||||
StorageClass string
|
||||
}
|
||||
|
||||
// Liveness specifies whether the object is live or not.
|
||||
type Liveness int
|
||||
|
||||
const (
|
||||
// LiveAndArchived includes both live and archived objects.
|
||||
LiveAndArchived Liveness = iota
|
||||
// Live specifies that the object is still live.
|
||||
Live
|
||||
// Archived specifies that the object is archived.
|
||||
Archived
|
||||
)
|
||||
|
||||
// LifecycleCondition is a set of conditions used to match objects and take an
|
||||
// action automatically.
|
||||
//
|
||||
// All configured conditions must be met for the associated action to be taken.
|
||||
type LifecycleCondition struct {
|
||||
// AgeInDays is the age of the object in days.
|
||||
AgeInDays int64
|
||||
|
||||
// CreatedBefore is the time the object was created.
|
||||
//
|
||||
// This condition is satisfied when an object is created before midnight of
|
||||
// the specified date in UTC.
|
||||
CreatedBefore time.Time
|
||||
|
||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
||||
Liveness Liveness
|
||||
|
||||
// MatchesStorageClasses is the condition matching the object's storage
|
||||
// class.
|
||||
//
|
||||
// Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE",
|
||||
// "STANDARD", and "DURABLE_REDUCED_AVAILABILITY".
|
||||
MatchesStorageClasses []string
|
||||
|
||||
// NumNewerVersions is the condition matching objects with a number of newer versions.
|
||||
//
|
||||
// If the value is N, this condition is satisfied when there are at least N
|
||||
// versions (including the live version) newer than this version of the
|
||||
// object.
|
||||
NumNewerVersions int64
|
||||
}
|
||||
|
||||
func newBucket(b *raw.Bucket) *BucketAttrs {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
bucket := &BucketAttrs{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
MetaGeneration: b.Metageneration,
|
||||
StorageClass: b.StorageClass,
|
||||
Created: convertTime(b.TimeCreated),
|
||||
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
|
||||
Labels: b.Labels,
|
||||
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
|
||||
Lifecycle: toLifecycle(b.Lifecycle),
|
||||
}
|
||||
acl := make([]ACLRule, len(b.Acl))
|
||||
for i, rule := range b.Acl {
|
||||
acl[i] = ACLRule{
|
||||
Entity: ACLEntity(rule.Entity),
|
||||
Role: ACLRole(rule.Role),
|
||||
}
|
||||
}
|
||||
bucket.ACL = acl
|
||||
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
|
||||
for i, rule := range b.DefaultObjectAcl {
|
||||
objACL[i] = ACLRule{
|
||||
Entity: ACLEntity(rule.Entity),
|
||||
Role: ACLRole(rule.Role),
|
||||
}
|
||||
}
|
||||
bucket.DefaultObjectACL = objACL
|
||||
return bucket
|
||||
}
|
||||
|
||||
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
|
||||
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||
var acl []*raw.BucketAccessControl
|
||||
if len(b.ACL) > 0 {
|
||||
acl = make([]*raw.BucketAccessControl, len(b.ACL))
|
||||
for i, rule := range b.ACL {
|
||||
acl[i] = &raw.BucketAccessControl{
|
||||
Entity: string(rule.Entity),
|
||||
Role: string(rule.Role),
|
||||
}
|
||||
}
|
||||
}
|
||||
dACL := toRawObjectACL(b.DefaultObjectACL)
|
||||
// Copy label map.
|
||||
var labels map[string]string
|
||||
if len(b.Labels) > 0 {
|
||||
labels = make(map[string]string, len(b.Labels))
|
||||
for k, v := range b.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
// Ignore VersioningEnabled if it is false. This is OK because
|
||||
// we only call this method when creating a bucket, and by default
|
||||
// new buckets have versioning off.
|
||||
var v *raw.BucketVersioning
|
||||
if b.VersioningEnabled {
|
||||
v = &raw.BucketVersioning{Enabled: true}
|
||||
}
|
||||
var bb *raw.BucketBilling
|
||||
if b.RequesterPays {
|
||||
bb = &raw.BucketBilling{RequesterPays: true}
|
||||
}
|
||||
return &raw.Bucket{
|
||||
Name: b.Name,
|
||||
DefaultObjectAcl: dACL,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: acl,
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
}
|
||||
}
|
||||
|
||||
type BucketAttrsToUpdate struct {
|
||||
// VersioningEnabled, if set, updates whether the bucket uses versioning.
|
||||
VersioningEnabled optional.Bool
|
||||
|
||||
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
|
||||
RequesterPays optional.Bool
|
||||
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
||||
// SetLabel causes a label to be added or modified when ua is used
|
||||
// in a call to Bucket.Update.
|
||||
func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
|
||||
if ua.setLabels == nil {
|
||||
ua.setLabels = map[string]string{}
|
||||
}
|
||||
ua.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted when ua is used in a
|
||||
// call to Bucket.Update.
|
||||
func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
|
||||
if ua.deleteLabels == nil {
|
||||
ua.deleteLabels = map[string]bool{}
|
||||
}
|
||||
ua.deleteLabels[name] = true
|
||||
}
|
||||
|
||||
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||
rb := &raw.Bucket{}
|
||||
if ua.VersioningEnabled != nil {
|
||||
rb.Versioning = &raw.BucketVersioning{
|
||||
Enabled: optional.ToBool(ua.VersioningEnabled),
|
||||
ForceSendFields: []string{"Enabled"},
|
||||
}
|
||||
}
|
||||
if ua.RequesterPays != nil {
|
||||
rb.Billing = &raw.BucketBilling{
|
||||
RequesterPays: optional.ToBool(ua.RequesterPays),
|
||||
ForceSendFields: []string{"RequesterPays"},
|
||||
}
|
||||
}
|
||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
||||
rb.Labels = map[string]string{}
|
||||
for k, v := range ua.setLabels {
|
||||
rb.Labels[k] = v
|
||||
}
|
||||
if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
|
||||
rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
|
||||
}
|
||||
for l := range ua.deleteLabels {
|
||||
rb.NullFields = append(rb.NullFields, "Labels."+l)
|
||||
}
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// If returns a new BucketHandle that applies a set of preconditions.
|
||||
// Preconditions already set on the BucketHandle are ignored.
|
||||
// Operations on the new handle will only occur if the preconditions are
|
||||
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
|
||||
// and MetagenerationNotMatch.
|
||||
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
|
||||
b2 := *b
|
||||
b2.conds = &conds
|
||||
return &b2
|
||||
}
|
||||
|
||||
// BucketConditions constrain bucket methods to act on specific metagenerations.
|
||||
//
|
||||
// The zero value is an empty set of constraints.
|
||||
type BucketConditions struct {
|
||||
// MetagenerationMatch specifies that the bucket must have the given
|
||||
// metageneration for the operation to occur.
|
||||
// If MetagenerationMatch is zero, it has no effect.
|
||||
MetagenerationMatch int64
|
||||
|
||||
// MetagenerationNotMatch specifies that the bucket must not have the given
|
||||
// metageneration for the operation to occur.
|
||||
// If MetagenerationNotMatch is zero, it has no effect.
|
||||
MetagenerationNotMatch int64
|
||||
}
|
||||
|
||||
func (c *BucketConditions) validate(method string) error {
|
||||
if *c == (BucketConditions{}) {
|
||||
return fmt.Errorf("storage: %s: empty conditions", method)
|
||||
}
|
||||
if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
|
||||
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserProject returns a new BucketHandle that passes the project ID as the user
|
||||
// project for all subsequent calls. A user project is required for all operations
|
||||
// on requester-pays buckets.
|
||||
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
||||
b2 := *b
|
||||
b2.userProject = projectID
|
||||
b2.acl.userProject = projectID
|
||||
b2.defaultObjectACL.userProject = projectID
|
||||
return &b2
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
// call is something that quacks like a *raw.WhateverCall.
|
||||
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate(method); err != nil {
|
||||
return err
|
||||
}
|
||||
cval := reflect.ValueOf(call)
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
|
||||
}
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
var rl raw.BucketLifecycle
|
||||
if len(l.Rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, r := range l.Rules {
|
||||
rr := &raw.BucketLifecycleRule{
|
||||
Action: &raw.BucketLifecycleRuleAction{
|
||||
Type: r.Action.Type,
|
||||
StorageClass: r.Action.StorageClass,
|
||||
},
|
||||
Condition: &raw.BucketLifecycleRuleCondition{
|
||||
Age: r.Condition.AgeInDays,
|
||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
||||
NumNewerVersions: r.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
case Live:
|
||||
rr.Condition.IsLive = googleapi.Bool(true)
|
||||
case Archived:
|
||||
rr.Condition.IsLive = googleapi.Bool(false)
|
||||
}
|
||||
|
||||
if !r.Condition.CreatedBefore.IsZero() {
|
||||
rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
|
||||
}
|
||||
rl.Rule = append(rl.Rule, rr)
|
||||
}
|
||||
return &rl
|
||||
}
|
||||
|
||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
return l
|
||||
}
|
||||
for _, rr := range rl.Rule {
|
||||
r := LifecycleRule{
|
||||
Action: LifecycleAction{
|
||||
Type: rr.Action.Type,
|
||||
StorageClass: rr.Action.StorageClass,
|
||||
},
|
||||
Condition: LifecycleCondition{
|
||||
AgeInDays: rr.Condition.Age,
|
||||
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
|
||||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
|
||||
switch {
|
||||
case rr.Condition.IsLive == nil:
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
case *rr.Condition.IsLive == true:
|
||||
r.Condition.Liveness = Live
|
||||
case *rr.Condition.IsLive == false:
|
||||
r.Condition.Liveness = Archived
|
||||
}
|
||||
|
||||
if rr.Condition.CreatedBefore != "" {
|
||||
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
||||
// If q is nil, no filtering is done.
|
||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||
it := &ObjectIterator{
|
||||
ctx: ctx,
|
||||
bucket: b,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
if q != nil {
|
||||
it.query = *q
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
||||
type ObjectIterator struct {
|
||||
ctx context.Context
|
||||
bucket *BucketHandle
|
||||
query Query
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*ObjectAttrs
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
//
|
||||
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
|
||||
// have a non-empty Prefix field, and a zero value for all other fields. These
|
||||
// represent prefixes.
|
||||
func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.Versions(it.query.Versions)
|
||||
req.PageToken(pageToken)
|
||||
if it.bucket.userProject != "" {
|
||||
req.UserProject(it.bucket.userProject)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
}
|
||||
for _, prefix := range resp.Prefixes {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// TODO(jbd): Add storage.buckets.update.
|
||||
|
||||
// Buckets returns an iterator over the buckets in the project. You may
|
||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
||||
// whose names begin with the prefix. By default, all buckets in the project
|
||||
// are returned.
|
||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
||||
it := &BucketIterator{
|
||||
ctx: ctx,
|
||||
client: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.buckets) },
|
||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A BucketIterator is an iterator over BucketAttrs.
|
||||
type BucketIterator struct {
|
||||
// Prefix restricts the iterator to buckets whose names begin with it.
|
||||
Prefix string
|
||||
|
||||
ctx context.Context
|
||||
client *Client
|
||||
projectID string
|
||||
buckets []*BucketAttrs
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := it.buckets[0]
|
||||
it.buckets = it.buckets[1:]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
var err error
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.buckets = append(it.buckets, newBucket(item))
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
// You can immediately call Run on the returned Copier, or
|
||||
// you can configure it first.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
||||
// in which case the user project of src is billed.
|
||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
||||
return &Copier{dst: dst, src: src}
|
||||
}
|
||||
|
||||
// A Copier copies a source object to a destination.
|
||||
type Copier struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// RewriteToken can be set before calling Run to resume a copy
|
||||
// operation. After Run returns a non-nil error, RewriteToken will
|
||||
// have been updated to contain the value needed to resume the copy.
|
||||
RewriteToken string
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far and the total size in bytes of the source object.
|
||||
//
|
||||
// ProgressFunc is intended to make upload progress available to the
|
||||
// application. For example, the implementation of ProgressFunc may update
|
||||
// a progress bar in the application's UI, or log the result of
|
||||
// float64(copiedBytes)/float64(totalBytes).
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
||||
|
||||
dst, src *ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
if err := c.src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
//
|
||||
// The encryption key for the destination object will be used to decrypt all
|
||||
// source objects and encrypt the destination object. It is an error
|
||||
// to specify an encryption key for any of the source objects.
|
||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
||||
return &Composer{dst: dst, srcs: srcs}
|
||||
}
|
||||
|
||||
// A Composer composes source objects into a destination object.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed.
|
||||
type Composer struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
dst *ObjectHandle
|
||||
srcs []*ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the compose operation.
|
||||
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if src.bucket != c.dst.bucket {
|
||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
||||
}
|
||||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package storage provides an easy way to work with Google Cloud Storage.
|
||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
||||
|
||||
More information about Google Cloud Storage is available at
|
||||
https://cloud.google.com/storage/docs.
|
||||
|
||||
All of the methods of this package use exponential backoff to retry calls
|
||||
that fail with certain errors, as described in
|
||||
https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets. You can use the
|
||||
standard Go io.Reader and io.Writer interfaces to read and write
|
||||
object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will overwrite whatever is there.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see
|
||||
https://cloud.google.com/storage/docs/access-control/iam).
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. Conditions let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
For example, say you've read an object's metadata into objAttrs. Now
|
||||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
You don't need to create a client to do this. See the documentation of
|
||||
SignedURL for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import "google.golang.org/api/googleapi"
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, ctx context.Context) *http.Request {
|
||||
return r.WithContext(ctx)
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/iam"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
|
||||
req := c.raw.Buckets.GetIamPolicy(resource)
|
||||
setClientHeader(req.Header())
|
||||
var rp *raw.Policy
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
rp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
|
||||
rp := iamToStoragePolicy(p)
|
||||
req := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
req := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(req.Header())
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
return &raw.Policy{
|
||||
Bindings: iamToStorageBindings(ip.Bindings),
|
||||
Etag: string(ip.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||
var rbs []*raw.PolicyBindings
|
||||
for _, ib := range ibs {
|
||||
rbs = append(rbs, &raw.PolicyBindings{
|
||||
Role: ib.Role,
|
||||
Members: ib.Members,
|
||||
})
|
||||
}
|
||||
return rbs
|
||||
}
|
||||
|
||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||
return &iampb.Policy{
|
||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||
Etag: []byte(rp.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||
var ibs []*iampb.Binding
|
||||
for _, rb := range rbs {
|
||||
ibs = append(ibs, &iampb.Binding{
|
||||
Role: rb.Role,
|
||||
Members: rb.Members,
|
||||
})
|
||||
}
|
||||
return ibs
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetry(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry on REFUSED_STREAM.
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
return strings.Contains(e.Error(), "REFUSED_STREAM")
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, _ interface{}) *http.Request {
|
||||
// In Go 1.6 and below, ignore the context.
|
||||
return r
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
type Reader struct {
|
||||
body io.ReadCloser
|
||||
remain, size int64
|
||||
contentType string
|
||||
cacheControl string
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.body.Read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||
// Check CRC here. It would be natural to check it in Close, but
|
||||
// everybody defers Close on the assumption that it doesn't return
|
||||
// anything worth looking at.
|
||||
if r.remain == 0 && r.gotCRC != r.wantCRC {
|
||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||
r.gotCRC, r.wantCRC)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.size
|
||||
}
|
||||
|
||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||
func (r *Reader) Remain() int64 {
|
||||
return r.remain
|
||||
}
|
||||
|
||||
// ContentType returns the content type of the object.
|
||||
func (r *Reader) ContentType() string {
|
||||
return r.contentType
|
||||
}
|
||||
|
||||
// CacheControl returns the cache control of the object.
|
||||
func (r *Reader) CacheControl() string {
|
||||
return r.cacheControl
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
type Writer struct {
|
||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||
// must be initialized before the first Write call. Nil or zero-valued
|
||||
// attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
// If a CRC32C is sent, and the data written does not match the checksum,
|
||||
// the write will be rejected.
|
||||
SendCRC32C bool
|
||||
|
||||
// ChunkSize controls the maximum number of bytes of the object that the
|
||||
// Writer will attempt to send to the server in a single request. Objects
|
||||
// smaller than the size will be sent in a single request, while larger
|
||||
// objects will be split over multiple requests. The size will be rounded up
|
||||
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
||||
// the object will be uploaded in a single request.
|
||||
//
|
||||
// ChunkSize will default to a reasonable value. Any custom configuration
|
||||
// must be done before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far.
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(int64)
|
||||
|
||||
ctx context.Context
|
||||
o *ObjectHandle
|
||||
|
||||
opened bool
|
||||
pw *io.PipeWriter
|
||||
|
||||
donec chan struct{} // closed after err and obj are set.
|
||||
err error
|
||||
obj *ObjectAttrs
|
||||
}
|
||||
|
||||
func (w *Writer) open() error {
|
||||
attrs := w.ObjectAttrs
|
||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
||||
// we don't want to store an object under the wrong name).
|
||||
if attrs.Name != w.o.object {
|
||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
||||
}
|
||||
if !utf8.ValidString(attrs.Name) {
|
||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
if w.ChunkSize < 0 {
|
||||
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
||||
}
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(w.ChunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx)
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.err = err
|
||||
pr.CloseWithError(w.err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
if w.o.userProject != "" {
|
||||
call.UserProject(w.o.userProject)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
// If the chunk size is zero, then no chunking is done on the Reader,
|
||||
// which means we cannot retry: the first call will read the data, and if
|
||||
// it fails, there is no way to re-read.
|
||||
if w.ChunkSize == 0 {
|
||||
resp, err = call.Do()
|
||||
} else {
|
||||
// We will only retry here if the initial POST, which obtains a URI for
|
||||
// the resumable upload, fails with a retryable error. The upload itself
|
||||
// has its own retry logic.
|
||||
err = runWithRetry(w.ctx, func() error {
|
||||
var err2 error
|
||||
resp, err2 = call.Do()
|
||||
return err2
|
||||
})
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
w.err = err
|
||||
pr.CloseWithError(w.err)
|
||||
return
|
||||
}
|
||||
w.obj = newObject(resp)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
// error even though the write failed (or will fail). Always
|
||||
// use the error returned from Writer.Close to determine if
|
||||
// the upload was successful.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return w.pw.Write(p)
|
||||
}
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := w.pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
<-w.donec
|
||||
return w.err
|
||||
}
|
||||
|
||||
// CloseWithError aborts the write operation with the provided error.
|
||||
// CloseWithError always returns nil.
|
||||
func (w *Writer) CloseWithError(err error) error {
|
||||
if !w.opened {
|
||||
return nil
|
||||
}
|
||||
return w.pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Attrs returns metadata about a successfully-written object.
|
||||
// It's only valid to call it after Close returns nil.
|
||||
func (w *Writer) Attrs() *ObjectAttrs {
|
||||
return w.obj
|
||||
}
|
37
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
generated
vendored
Normal file
37
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
|
||||
# at src/google/protobuf/descriptor.proto
|
||||
regenerate:
|
||||
@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
|
||||
cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto .
|
||||
protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto
|
2215
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
2215
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
849
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
849
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
|
@ -0,0 +1,849 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Author: kenton@google.com (Kenton Varda)
|
||||
// Based on original Protocol Buffers design by
|
||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
||||
//
|
||||
// The messages in this file describe the definitions found in .proto files.
|
||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
||||
// without any other information (e.g. without reading its imports).
|
||||
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package google.protobuf;
|
||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DescriptorProtos";
|
||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// descriptor.proto must be optimized for speed because reflection-based
|
||||
// algorithms don't work during bootstrapping.
|
||||
option optimize_for = SPEED;
|
||||
|
||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
||||
// files it parses.
|
||||
message FileDescriptorSet {
|
||||
repeated FileDescriptorProto file = 1;
|
||||
}
|
||||
|
||||
// Describes a complete .proto file.
|
||||
message FileDescriptorProto {
|
||||
optional string name = 1; // file name, relative to root of source tree
|
||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
||||
|
||||
// Names of files imported by this file.
|
||||
repeated string dependency = 3;
|
||||
// Indexes of the public imported files in the dependency list above.
|
||||
repeated int32 public_dependency = 10;
|
||||
// Indexes of the weak imported files in the dependency list.
|
||||
// For Google-internal migration only. Do not use.
|
||||
repeated int32 weak_dependency = 11;
|
||||
|
||||
// All top-level definitions in this file.
|
||||
repeated DescriptorProto message_type = 4;
|
||||
repeated EnumDescriptorProto enum_type = 5;
|
||||
repeated ServiceDescriptorProto service = 6;
|
||||
repeated FieldDescriptorProto extension = 7;
|
||||
|
||||
optional FileOptions options = 8;
|
||||
|
||||
// This field contains optional information about the original source code.
|
||||
// You may safely remove this entire field without harming runtime
|
||||
// functionality of the descriptors -- the information is needed only by
|
||||
// development tools.
|
||||
optional SourceCodeInfo source_code_info = 9;
|
||||
|
||||
// The syntax of the proto file.
|
||||
// The supported values are "proto2" and "proto3".
|
||||
optional string syntax = 12;
|
||||
}
|
||||
|
||||
// Describes a message type.
|
||||
message DescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated FieldDescriptorProto field = 2;
|
||||
repeated FieldDescriptorProto extension = 6;
|
||||
|
||||
repeated DescriptorProto nested_type = 3;
|
||||
repeated EnumDescriptorProto enum_type = 4;
|
||||
|
||||
message ExtensionRange {
|
||||
optional int32 start = 1;
|
||||
optional int32 end = 2;
|
||||
|
||||
optional ExtensionRangeOptions options = 3;
|
||||
}
|
||||
repeated ExtensionRange extension_range = 5;
|
||||
|
||||
repeated OneofDescriptorProto oneof_decl = 8;
|
||||
|
||||
optional MessageOptions options = 7;
|
||||
|
||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
||||
// fields or extension ranges in the same message. Reserved ranges may
|
||||
// not overlap.
|
||||
message ReservedRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Exclusive.
|
||||
}
|
||||
repeated ReservedRange reserved_range = 9;
|
||||
// Reserved field names, which may not be used by fields in the same message.
|
||||
// A given name may only be reserved once.
|
||||
repeated string reserved_name = 10;
|
||||
}
|
||||
|
||||
message ExtensionRangeOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
// Describes a field within a message.
|
||||
message FieldDescriptorProto {
|
||||
enum Type {
|
||||
// 0 is reserved for errors.
|
||||
// Order is weird for historical reasons.
|
||||
TYPE_DOUBLE = 1;
|
||||
TYPE_FLOAT = 2;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
||||
// negative values are likely.
|
||||
TYPE_INT64 = 3;
|
||||
TYPE_UINT64 = 4;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
||||
// negative values are likely.
|
||||
TYPE_INT32 = 5;
|
||||
TYPE_FIXED64 = 6;
|
||||
TYPE_FIXED32 = 7;
|
||||
TYPE_BOOL = 8;
|
||||
TYPE_STRING = 9;
|
||||
// Tag-delimited aggregate.
|
||||
// Group type is deprecated and not supported in proto3. However, Proto3
|
||||
// implementations should still be able to parse the group wire format and
|
||||
// treat group fields as unknown fields.
|
||||
TYPE_GROUP = 10;
|
||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
||||
|
||||
// New in version 2.
|
||||
TYPE_BYTES = 12;
|
||||
TYPE_UINT32 = 13;
|
||||
TYPE_ENUM = 14;
|
||||
TYPE_SFIXED32 = 15;
|
||||
TYPE_SFIXED64 = 16;
|
||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
||||
};
|
||||
|
||||
enum Label {
|
||||
// 0 is reserved for errors
|
||||
LABEL_OPTIONAL = 1;
|
||||
LABEL_REQUIRED = 2;
|
||||
LABEL_REPEATED = 3;
|
||||
};
|
||||
|
||||
optional string name = 1;
|
||||
optional int32 number = 3;
|
||||
optional Label label = 4;
|
||||
|
||||
// If type_name is set, this need not be set. If both this and type_name
|
||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
||||
optional Type type = 5;
|
||||
|
||||
// For message and enum types, this is the name of the type. If the name
|
||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
||||
// rules are used to find the type (i.e. first the nested types within this
|
||||
// message are searched, then within the parent, on up to the root
|
||||
// namespace).
|
||||
optional string type_name = 6;
|
||||
|
||||
// For extensions, this is the name of the type being extended. It is
|
||||
// resolved in the same manner as type_name.
|
||||
optional string extendee = 2;
|
||||
|
||||
// For numeric types, contains the original text representation of the value.
|
||||
// For booleans, "true" or "false".
|
||||
// For strings, contains the default text contents (not escaped in any way).
|
||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
||||
// TODO(kenton): Base-64 encode?
|
||||
optional string default_value = 7;
|
||||
|
||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
||||
// list. This field is a member of that oneof.
|
||||
optional int32 oneof_index = 9;
|
||||
|
||||
// JSON name of this field. The value is set by protocol compiler. If the
|
||||
// user has set a "json_name" option on this field, that option's value
|
||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
||||
// it to camelCase.
|
||||
optional string json_name = 10;
|
||||
|
||||
optional FieldOptions options = 8;
|
||||
}
|
||||
|
||||
// Describes a oneof.
|
||||
message OneofDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional OneofOptions options = 2;
|
||||
}
|
||||
|
||||
// Describes an enum type.
|
||||
message EnumDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated EnumValueDescriptorProto value = 2;
|
||||
|
||||
optional EnumOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a value within an enum.
|
||||
message EnumValueDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional int32 number = 2;
|
||||
|
||||
optional EnumValueOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a service.
|
||||
message ServiceDescriptorProto {
|
||||
optional string name = 1;
|
||||
repeated MethodDescriptorProto method = 2;
|
||||
|
||||
optional ServiceOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a method of a service.
|
||||
message MethodDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
// Input and output type names. These are resolved in the same way as
|
||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
||||
optional string input_type = 2;
|
||||
optional string output_type = 3;
|
||||
|
||||
optional MethodOptions options = 4;
|
||||
|
||||
// Identifies if client streams multiple client messages
|
||||
optional bool client_streaming = 5 [default=false];
|
||||
// Identifies if server streams multiple server messages
|
||||
optional bool server_streaming = 6 [default=false];
|
||||
}
|
||||
|
||||
|
||||
// ===================================================================
|
||||
// Options
|
||||
|
||||
// Each of the definitions above may have "options" attached. These are
|
||||
// just annotations which may cause code to be generated slightly differently
|
||||
// or may contain hints for code that manipulates protocol messages.
|
||||
//
|
||||
// Clients may define custom options as extensions of the *Options messages.
|
||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
||||
// store the values in them. Instead it stores them in a field in the *Options
|
||||
// message called uninterpreted_option. This field must have the same name
|
||||
// across all *Options messages. We then use this field to populate the
|
||||
// extensions when we build a descriptor, at which point all protos have been
|
||||
// parsed and so all extensions are known.
|
||||
//
|
||||
// Extension numbers for custom options may be chosen as follows:
|
||||
// * For options which will only be used within a single application or
|
||||
// organization, or for experimental options, use field numbers 50000
|
||||
// through 99999. It is up to you to ensure that you do not use the
|
||||
// same number for multiple options.
|
||||
// * For options which will be published and used publicly by multiple
|
||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
||||
// Objective-C plugin) and your project website (if available) -- there's no
|
||||
// need to explain how you intend to use them. Usually you only need one
|
||||
// extension number. You can declare multiple options with only one extension
|
||||
// number by putting them in a sub-message. See the Custom Options section of
|
||||
// the docs for examples:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
||||
// If this turns out to be popular, a web service will be set up
|
||||
// to automatically assign option numbers.
|
||||
|
||||
|
||||
message FileOptions {
|
||||
|
||||
// Sets the Java package where classes generated from this .proto will be
|
||||
// placed. By default, the proto package is used, but this is often
|
||||
// inappropriate because proto packages do not normally start with backwards
|
||||
// domain names.
|
||||
optional string java_package = 1;
|
||||
|
||||
|
||||
// If set, all the classes from the .proto file are wrapped in a single
|
||||
// outer class with the given name. This applies to both Proto1
|
||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
||||
// a .proto always translates to a single class, but you may want to
|
||||
// explicitly choose the class name).
|
||||
optional string java_outer_classname = 8;
|
||||
|
||||
// If set true, then the Java code generator will generate a separate .java
|
||||
// file for each top-level message, enum, and service defined in the .proto
|
||||
// file. Thus, these types will *not* be nested inside the outer class
|
||||
// named by java_outer_classname. However, the outer class will still be
|
||||
// generated to contain the file's getDescriptor() method as well as any
|
||||
// top-level extensions defined in the file.
|
||||
optional bool java_multiple_files = 10 [default=false];
|
||||
|
||||
// This option does nothing.
|
||||
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
||||
|
||||
// If set true, then the Java2 code generator will generate code that
|
||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
||||
// byte sequence to a string field.
|
||||
// Message reflection will do the same.
|
||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
||||
// This option has no effect on when used with the lite runtime.
|
||||
optional bool java_string_check_utf8 = 27 [default=false];
|
||||
|
||||
|
||||
// Generated classes can be optimized for speed or code size.
|
||||
enum OptimizeMode {
|
||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
||||
// etc.
|
||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
||||
}
|
||||
optional OptimizeMode optimize_for = 9 [default=SPEED];
|
||||
|
||||
// Sets the Go package where structs generated from this .proto will be
|
||||
// placed. If omitted, the Go package will be derived from the following:
|
||||
// - The basename of the package import path, if provided.
|
||||
// - Otherwise, the package statement in the .proto file, if present.
|
||||
// - Otherwise, the basename of the .proto file, without extension.
|
||||
optional string go_package = 11;
|
||||
|
||||
|
||||
|
||||
// Should generic services be generated in each language? "Generic" services
|
||||
// are not specific to any particular RPC system. They are generated by the
|
||||
// main code generators in each language (without additional plugins).
|
||||
// Generic services were the only kind of service generation supported by
|
||||
// early versions of google.protobuf.
|
||||
//
|
||||
// Generic services are now considered deprecated in favor of using plugins
|
||||
// that generate code specific to your particular RPC system. Therefore,
|
||||
// these default to false. Old code which depends on generic services should
|
||||
// explicitly set them to true.
|
||||
optional bool cc_generic_services = 16 [default=false];
|
||||
optional bool java_generic_services = 17 [default=false];
|
||||
optional bool py_generic_services = 18 [default=false];
|
||||
optional bool php_generic_services = 42 [default=false];
|
||||
|
||||
// Is this file deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for everything in the file, or it will be completely ignored; in the very
|
||||
// least, this is a formalization for deprecating files.
|
||||
optional bool deprecated = 23 [default=false];
|
||||
|
||||
// Enables the use of arenas for the proto messages in this file. This applies
|
||||
// only to generated classes for C++.
|
||||
optional bool cc_enable_arenas = 31 [default=false];
|
||||
|
||||
|
||||
// Sets the objective c class prefix which is prepended to all objective c
|
||||
// generated classes from this .proto. There is no default.
|
||||
optional string objc_class_prefix = 36;
|
||||
|
||||
// Namespace for generated classes; defaults to the package.
|
||||
optional string csharp_namespace = 37;
|
||||
|
||||
// By default Swift generators will take the proto package and CamelCase it
|
||||
// replacing '.' with underscore and use that to prefix the types/symbols
|
||||
// defined. When this options is provided, they will use this value instead
|
||||
// to prefix the types/symbols defined.
|
||||
optional string swift_prefix = 39;
|
||||
|
||||
// Sets the php class prefix which is prepended to all php generated classes
|
||||
// from this .proto. Default is empty.
|
||||
optional string php_class_prefix = 40;
|
||||
|
||||
// Use this option to change the namespace of php generated classes. Default
|
||||
// is empty. When this option is empty, the package name will be used for
|
||||
// determining the namespace.
|
||||
optional string php_namespace = 41;
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
|
||||
reserved 38;
|
||||
}
|
||||
|
||||
message MessageOptions {
|
||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
||||
// This is provided for backwards-compatibility with the MessageSet wire
|
||||
// format. You should not use this for any other reason: It's less
|
||||
// efficient, has fewer features, and is more complicated.
|
||||
//
|
||||
// The message must be defined exactly as follows:
|
||||
// message Foo {
|
||||
// option message_set_wire_format = true;
|
||||
// extensions 4 to max;
|
||||
// }
|
||||
// Note that the message cannot have any defined fields; MessageSets only
|
||||
// have extensions.
|
||||
//
|
||||
// All extensions of your type must be singular messages; e.g. they cannot
|
||||
// be int32s, enums, or repeated messages.
|
||||
//
|
||||
// Because this is an option, the above two restrictions are not enforced by
|
||||
// the protocol compiler.
|
||||
optional bool message_set_wire_format = 1 [default=false];
|
||||
|
||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
||||
// conflict with a field of the same name. This is meant to make migration
|
||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
||||
optional bool no_standard_descriptor_accessor = 2 [default=false];
|
||||
|
||||
// Is this message deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the message, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating messages.
|
||||
optional bool deprecated = 3 [default=false];
|
||||
|
||||
// Whether the message is an automatically generated map entry type for the
|
||||
// maps field.
|
||||
//
|
||||
// For maps fields:
|
||||
// map<KeyType, ValueType> map_field = 1;
|
||||
// The parsed descriptor looks like:
|
||||
// message MapFieldEntry {
|
||||
// option map_entry = true;
|
||||
// optional KeyType key = 1;
|
||||
// optional ValueType value = 2;
|
||||
// }
|
||||
// repeated MapFieldEntry map_field = 1;
|
||||
//
|
||||
// Implementations may choose not to generate the map_entry=true message, but
|
||||
// use a native map in the target language to hold the keys and values.
|
||||
// The reflection APIs in such implementions still need to work as
|
||||
// if the field is a repeated message field.
|
||||
//
|
||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
||||
// instead. The option should only be implicitly set by the proto compiler
|
||||
// parser.
|
||||
optional bool map_entry = 7;
|
||||
|
||||
reserved 8; // javalite_serializable
|
||||
reserved 9; // javanano_as_lite
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message FieldOptions {
|
||||
// The ctype option instructs the C++ code generator to use a different
|
||||
// representation of the field than it normally would. See the specific
|
||||
// options below. This option is not yet implemented in the open source
|
||||
// release -- sorry, we'll try to include it in a future version!
|
||||
optional CType ctype = 1 [default = STRING];
|
||||
enum CType {
|
||||
// Default mode.
|
||||
STRING = 0;
|
||||
|
||||
CORD = 1;
|
||||
|
||||
STRING_PIECE = 2;
|
||||
}
|
||||
// The packed option can be enabled for repeated primitive fields to enable
|
||||
// a more efficient representation on the wire. Rather than repeatedly
|
||||
// writing the tag and type for each element, the entire array is encoded as
|
||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
||||
// false will avoid using packed encoding.
|
||||
optional bool packed = 2;
|
||||
|
||||
// The jstype option determines the JavaScript type used for values of the
|
||||
// field. The option is permitted only for 64 bit integral and fixed types
|
||||
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
||||
// is represented as JavaScript string, which avoids loss of precision that
|
||||
// can happen when a large value is converted to a floating point JavaScript.
|
||||
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
||||
// use the JavaScript "number" type. The behavior of the default option
|
||||
// JS_NORMAL is implementation dependent.
|
||||
//
|
||||
// This option is an enum to permit additional types to be added, e.g.
|
||||
// goog.math.Integer.
|
||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
||||
enum JSType {
|
||||
// Use the default type.
|
||||
JS_NORMAL = 0;
|
||||
|
||||
// Use JavaScript strings.
|
||||
JS_STRING = 1;
|
||||
|
||||
// Use JavaScript numbers.
|
||||
JS_NUMBER = 2;
|
||||
}
|
||||
|
||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
||||
// fields. It means that when the outer message is initially parsed, the
|
||||
// inner message's contents will not be parsed but instead stored in encoded
|
||||
// form. The inner message will actually be parsed when it is first accessed.
|
||||
//
|
||||
// This is only a hint. Implementations are free to choose whether to use
|
||||
// eager or lazy parsing regardless of the value of this option. However,
|
||||
// setting this option true suggests that the protocol author believes that
|
||||
// using lazy parsing on this field is worth the additional bookkeeping
|
||||
// overhead typically needed to implement it.
|
||||
//
|
||||
// This option does not affect the public interface of any generated code;
|
||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
||||
// interface is not affected by this option; const methods remain safe to
|
||||
// call from multiple threads concurrently, while non-const methods continue
|
||||
// to require exclusive access.
|
||||
//
|
||||
//
|
||||
// Note that implementations may choose not to check required fields within
|
||||
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
||||
// may return true even if the inner message has missing required fields.
|
||||
// This is necessary because otherwise the inner message would have to be
|
||||
// parsed in order to perform the check, defeating the purpose of lazy
|
||||
// parsing. An implementation which chooses not to check required fields
|
||||
// must be consistent about it. That is, for any particular sub-message, the
|
||||
// implementation must either *always* check its required fields, or *never*
|
||||
// check its required fields, regardless of whether or not the message has
|
||||
// been parsed.
|
||||
optional bool lazy = 5 [default=false];
|
||||
|
||||
// Is this field deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for accessors, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating fields.
|
||||
optional bool deprecated = 3 [default=false];
|
||||
|
||||
// For Google-internal migration only. Do not use.
|
||||
optional bool weak = 10 [default=false];
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
|
||||
reserved 4; // removed jtype
|
||||
}
|
||||
|
||||
message OneofOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumOptions {
|
||||
|
||||
// Set this option to true to allow mapping different tag names to the same
|
||||
// value.
|
||||
optional bool allow_alias = 2;
|
||||
|
||||
// Is this enum deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating enums.
|
||||
optional bool deprecated = 3 [default=false];
|
||||
|
||||
reserved 5; // javanano_as_lite
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumValueOptions {
|
||||
// Is this enum value deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum value, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating enum values.
|
||||
optional bool deprecated = 1 [default=false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message ServiceOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this service deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the service, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating services.
|
||||
optional bool deprecated = 33 [default=false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message MethodOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this method deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the method, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating methods.
|
||||
optional bool deprecated = 33 [default=false];
|
||||
|
||||
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
||||
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
||||
// methods, and PUT verb for idempotent methods instead of the default POST.
|
||||
enum IdempotencyLevel {
|
||||
IDEMPOTENCY_UNKNOWN = 0;
|
||||
NO_SIDE_EFFECTS = 1; // implies idempotent
|
||||
IDEMPOTENT = 2; // idempotent, but may have side effects
|
||||
}
|
||||
optional IdempotencyLevel idempotency_level =
|
||||
34 [default=IDEMPOTENCY_UNKNOWN];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
|
||||
// A message representing a option the parser does not recognize. This only
|
||||
// appears in options protos created by the compiler::Parser class.
|
||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
||||
// in them.
|
||||
message UninterpretedOption {
|
||||
// The name of the uninterpreted option. Each string represents a segment in
|
||||
// a dot-separated name. is_extension is true iff a segment represents an
|
||||
// extension (denoted with parentheses in options specs in .proto files).
|
||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
||||
// "foo.(bar.baz).qux".
|
||||
message NamePart {
|
||||
required string name_part = 1;
|
||||
required bool is_extension = 2;
|
||||
}
|
||||
repeated NamePart name = 2;
|
||||
|
||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
||||
// identified it as during parsing. Exactly one of these should be set.
|
||||
optional string identifier_value = 3;
|
||||
optional uint64 positive_int_value = 4;
|
||||
optional int64 negative_int_value = 5;
|
||||
optional double double_value = 6;
|
||||
optional bytes string_value = 7;
|
||||
optional string aggregate_value = 8;
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Optional source code info
|
||||
|
||||
// Encapsulates information about the original source file from which a
|
||||
// FileDescriptorProto was generated.
|
||||
message SourceCodeInfo {
|
||||
// A Location identifies a piece of source code in a .proto file which
|
||||
// corresponds to a particular definition. This information is intended
|
||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
||||
// tools.
|
||||
//
|
||||
// For example, say we have a file like:
|
||||
// message Foo {
|
||||
// optional string foo = 1;
|
||||
// }
|
||||
// Let's look at just the field definition:
|
||||
// optional string foo = 1;
|
||||
// ^ ^^ ^^ ^ ^^^
|
||||
// a bc de f ghi
|
||||
// We have the following locations:
|
||||
// span path represents
|
||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
||||
//
|
||||
// Notes:
|
||||
// - A location may refer to a repeated field itself (i.e. not to any
|
||||
// particular index within it). This is used whenever a set of elements are
|
||||
// logically enclosed in a single code segment. For example, an entire
|
||||
// extend block (possibly containing multiple extension definitions) will
|
||||
// have an outer location whose path refers to the "extensions" repeated
|
||||
// field without an index.
|
||||
// - Multiple locations may have the same path. This happens when a single
|
||||
// logical declaration is spread out across multiple places. The most
|
||||
// obvious example is the "extend" block again -- there may be multiple
|
||||
// extend blocks in the same scope, each of which will have the same path.
|
||||
// - A location's span is not always a subset of its parent's span. For
|
||||
// example, the "extendee" of an extension declaration appears at the
|
||||
// beginning of the "extend" block and is shared by all extensions within
|
||||
// the block.
|
||||
// - Just because a location's span is a subset of some other location's span
|
||||
// does not mean that it is a descendent. For example, a "group" defines
|
||||
// both a type and a field in a single declaration. Thus, the locations
|
||||
// corresponding to the type and field and their components will overlap.
|
||||
// - Code which tries to interpret locations should probably be designed to
|
||||
// ignore those that it doesn't understand, as more types of locations could
|
||||
// be recorded in the future.
|
||||
repeated Location location = 1;
|
||||
message Location {
|
||||
// Identifies which part of the FileDescriptorProto was defined at this
|
||||
// location.
|
||||
//
|
||||
// Each element is a field number or an index. They form a path from
|
||||
// the root FileDescriptorProto to the place where the definition. For
|
||||
// example, this path:
|
||||
// [ 4, 3, 2, 7, 1 ]
|
||||
// refers to:
|
||||
// file.message_type(3) // 4, 3
|
||||
// .field(7) // 2, 7
|
||||
// .name() // 1
|
||||
// This is because FileDescriptorProto.message_type has field number 4:
|
||||
// repeated DescriptorProto message_type = 4;
|
||||
// and DescriptorProto.field has field number 2:
|
||||
// repeated FieldDescriptorProto field = 2;
|
||||
// and FieldDescriptorProto.name has field number 1:
|
||||
// optional string name = 1;
|
||||
//
|
||||
// Thus, the above path gives the location of a field name. If we removed
|
||||
// the last element:
|
||||
// [ 4, 3, 2, 7 ]
|
||||
// this path refers to the whole field declaration (from the beginning
|
||||
// of the label to the terminating semicolon).
|
||||
repeated int32 path = 1 [packed=true];
|
||||
|
||||
// Always has exactly three or four elements: start line, start column,
|
||||
// end line (optional, otherwise assumed same as start line), end column.
|
||||
// These are packed into a single field for efficiency. Note that line
|
||||
// and column numbers are zero-based -- typically you will want to add
|
||||
// 1 to each before displaying to a user.
|
||||
repeated int32 span = 2 [packed=true];
|
||||
|
||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
||||
// comments appearing before and after the declaration which appear to be
|
||||
// attached to the declaration.
|
||||
//
|
||||
// A series of line comments appearing on consecutive lines, with no other
|
||||
// tokens appearing on those lines, will be treated as a single comment.
|
||||
//
|
||||
// leading_detached_comments will keep paragraphs of comments that appear
|
||||
// before (but not connected to) the current element. Each paragraph,
|
||||
// separated by empty lines, will be one comment element in the repeated
|
||||
// field.
|
||||
//
|
||||
// Only the comment content is provided; comment markers (e.g. //) are
|
||||
// stripped out. For block comments, leading whitespace and an asterisk
|
||||
// will be stripped from the beginning of each line other than the first.
|
||||
// Newlines are included in the output.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// optional int32 foo = 1; // Comment attached to foo.
|
||||
// // Comment attached to bar.
|
||||
// optional int32 bar = 2;
|
||||
//
|
||||
// optional string baz = 3;
|
||||
// // Comment attached to baz.
|
||||
// // Another line attached to baz.
|
||||
//
|
||||
// // Comment attached to qux.
|
||||
// //
|
||||
// // Another line attached to qux.
|
||||
// optional double qux = 4;
|
||||
//
|
||||
// // Detached comment for corge. This is not leading or trailing comments
|
||||
// // to qux or corge because there are blank lines separating it from
|
||||
// // both.
|
||||
//
|
||||
// // Detached comment for corge paragraph 2.
|
||||
//
|
||||
// optional string corge = 5;
|
||||
// /* Block comment attached
|
||||
// * to corge. Leading asterisks
|
||||
// * will be removed. */
|
||||
// /* Block comment attached to
|
||||
// * grault. */
|
||||
// optional int32 grault = 6;
|
||||
//
|
||||
// // ignored detached comments.
|
||||
optional string leading_comments = 3;
|
||||
optional string trailing_comments = 4;
|
||||
repeated string leading_detached_comments = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// Describes the relationship between generated code and its original source
|
||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
||||
// source file, but may contain references to different source .proto files.
|
||||
message GeneratedCodeInfo {
|
||||
// An Annotation connects some span of text in generated code to an element
|
||||
// of its generating .proto file.
|
||||
repeated Annotation annotation = 1;
|
||||
message Annotation {
|
||||
// Identifies the element in the original source .proto file. This field
|
||||
// is formatted the same as SourceCodeInfo.Location.path.
|
||||
repeated int32 path = 1 [packed=true];
|
||||
|
||||
// Identifies the filesystem path to the original source .proto.
|
||||
optional string source_file = 2;
|
||||
|
||||
// Identifies the starting offset in bytes in the generated code
|
||||
// that relates to the identified object.
|
||||
optional int32 begin = 3;
|
||||
|
||||
// Identifies the ending offset in bytes in the generated code that
|
||||
// relates to the identified offset. The end offset should be one past
|
||||
// the last relevant byte (so the length of the text = end - begin).
|
||||
optional int32 end = 4;
|
||||
}
|
||||
}
|
|
@ -14,8 +14,8 @@ import (
|
|||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
|
||||
var appengineVM bool
|
||||
// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex.
|
||||
var appengineFlex bool
|
||||
|
||||
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
|
||||
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
// Copyright 2015 The oauth2 Authors. All rights reserved.
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appenginevm
|
||||
// +build appengine appenginevm
|
||||
|
||||
package google
|
||||
|
||||
import "google.golang.org/appengine"
|
||||
|
||||
func init() {
|
||||
appengineVM = true
|
||||
appengineTokenFunc = appengine.AccessToken
|
||||
appengineAppIDFunc = appengine.AppID
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appenginevm
|
||||
|
||||
package google
|
||||
|
||||
func init() {
|
||||
appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server.
|
||||
}
|
|
@ -24,6 +24,12 @@ import (
|
|||
type DefaultCredentials struct {
|
||||
ProjectID string // may be empty
|
||||
TokenSource oauth2.TokenSource
|
||||
|
||||
// JSON contains the raw bytes from a JSON credentials file.
|
||||
// This field may be nil if authentication is provided by the
|
||||
// environment and not with a credentials file, e.g. when code is
|
||||
// running on Google Cloud Platform.
|
||||
JSON []byte
|
||||
}
|
||||
|
||||
// DefaultClient returns an HTTP Client that uses the
|
||||
|
@ -81,7 +87,7 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede
|
|||
}
|
||||
|
||||
// Third, if we're on Google App Engine use those credentials.
|
||||
if appengineTokenFunc != nil && !appengineVM {
|
||||
if appengineTokenFunc != nil && !appengineFlex {
|
||||
return &DefaultCredentials{
|
||||
ProjectID: appengineAppIDFunc(ctx),
|
||||
TokenSource: AppEngineTokenSource(ctx, scope...),
|
||||
|
@ -126,5 +132,6 @@ func readCredentialsFile(ctx context.Context, filename string, scopes []string)
|
|||
return &DefaultCredentials{
|
||||
ProjectID: f.ProjectID,
|
||||
TokenSource: ts,
|
||||
JSON: b,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -22,23 +22,33 @@ func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]by
|
|||
return json.Marshal(schema)
|
||||
}
|
||||
|
||||
mustInclude := make(map[string]struct{})
|
||||
mustInclude := make(map[string]bool)
|
||||
for _, f := range forceSendFields {
|
||||
mustInclude[f] = struct{}{}
|
||||
mustInclude[f] = true
|
||||
}
|
||||
useNull := make(map[string]struct{})
|
||||
for _, f := range nullFields {
|
||||
useNull[f] = struct{}{}
|
||||
useNull := make(map[string]bool)
|
||||
useNullMaps := make(map[string]map[string]bool)
|
||||
for _, nf := range nullFields {
|
||||
parts := strings.SplitN(nf, ".", 2)
|
||||
field := parts[0]
|
||||
if len(parts) == 1 {
|
||||
useNull[field] = true
|
||||
} else {
|
||||
if useNullMaps[field] == nil {
|
||||
useNullMaps[field] = map[string]bool{}
|
||||
}
|
||||
useNullMaps[field][parts[1]] = true
|
||||
}
|
||||
}
|
||||
|
||||
dataMap, err := schemaToMap(schema, mustInclude, useNull)
|
||||
dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(dataMap)
|
||||
}
|
||||
|
||||
func schemaToMap(schema interface{}, mustInclude, useNull map[string]struct{}) (map[string]interface{}, error) {
|
||||
func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
s := reflect.ValueOf(schema)
|
||||
st := s.Type()
|
||||
|
@ -59,17 +69,35 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]struct{}) (
|
|||
v := s.Field(i)
|
||||
f := st.Field(i)
|
||||
|
||||
if _, ok := useNull[f.Name]; ok {
|
||||
if useNull[f.Name] {
|
||||
if !isEmptyValue(v) {
|
||||
return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
|
||||
}
|
||||
m[tag.apiName] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
if !includeField(v, f, mustInclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If map fields are explicitly set to null, use a map[string]interface{}.
|
||||
if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
|
||||
ms, ok := v.Interface().(map[string]string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
|
||||
}
|
||||
mi := map[string]interface{}{}
|
||||
for k, v := range ms {
|
||||
mi[k] = v
|
||||
}
|
||||
for k := range useNullMaps[f.Name] {
|
||||
mi[k] = nil
|
||||
}
|
||||
m[tag.apiName] = mi
|
||||
continue
|
||||
}
|
||||
|
||||
// nil maps are treated as empty maps.
|
||||
if f.Type.Kind() == reflect.Map && v.IsNil() {
|
||||
m[tag.apiName] = map[string]string{}
|
||||
|
@ -139,7 +167,7 @@ func parseJSONTag(val string) (jsonTag, error) {
|
|||
}
|
||||
|
||||
// Reports whether the struct field "f" with value "v" should be included in JSON output.
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
|
||||
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
|
||||
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
|
||||
// However, many fields are not pointers, so there would be no way to delete these fields.
|
||||
|
@ -156,8 +184,7 @@ func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string
|
|||
return false
|
||||
}
|
||||
|
||||
_, ok := mustInclude[f.Name]
|
||||
return ok || !isEmptyValue(v)
|
||||
return mustInclude[f.Name] || !isEmptyValue(v)
|
||||
}
|
||||
|
||||
// isEmptyValue reports whether v is the empty value for its type. This
|
||||
|
|
|
@ -174,26 +174,126 @@ func typeHeader(contentType string) textproto.MIMEHeader {
|
|||
// PrepareUpload determines whether the data in the supplied reader should be
|
||||
// uploaded in a single request, or in sequential chunks.
|
||||
// chunkSize is the size of the chunk that media should be split into.
|
||||
// If chunkSize is non-zero and the contents of media do not fit in a single
|
||||
// chunk (or there is an error reading media), then media will be returned as a
|
||||
// MediaBuffer. Otherwise, media will be returned as a Reader.
|
||||
//
|
||||
// If chunkSize is zero, media is returned as the first value, and the other
|
||||
// two return values are nil, true.
|
||||
//
|
||||
// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
|
||||
// contents of media fit in a single chunk.
|
||||
//
|
||||
// After PrepareUpload has been called, media should no longer be used: the
|
||||
// media content should be accessed via one of the return values.
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) {
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
|
||||
if chunkSize == 0 { // do not chunk
|
||||
return media, nil
|
||||
return media, nil, true
|
||||
}
|
||||
mb = NewMediaBuffer(media, chunkSize)
|
||||
_, _, _, err := mb.Chunk()
|
||||
// If err is io.EOF, we can upload this in a single request. Otherwise, err is
|
||||
// either nil or a non-EOF error. If it is the latter, then the next call to
|
||||
// mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
|
||||
// error will be handled at some point.
|
||||
return nil, mb, err == io.EOF
|
||||
}
|
||||
|
||||
// MediaInfo holds information for media uploads. It is intended for use by generated
|
||||
// code only.
|
||||
type MediaInfo struct {
|
||||
// At most one of Media and MediaBuffer will be set.
|
||||
media io.Reader
|
||||
buffer *MediaBuffer
|
||||
singleChunk bool
|
||||
mType string
|
||||
size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
|
||||
progressUpdater googleapi.ProgressUpdater
|
||||
}
|
||||
|
||||
// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
|
||||
// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
|
||||
// if needed.
|
||||
func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
|
||||
mi := &MediaInfo{}
|
||||
opts := googleapi.ProcessMediaOptions(options)
|
||||
if !opts.ForceEmptyContentType {
|
||||
r, mi.mType = DetermineContentType(r, opts.ContentType)
|
||||
}
|
||||
mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
|
||||
return mi
|
||||
}
|
||||
|
||||
// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
|
||||
// call. It returns a MediaInfo using the given reader, size and media type.
|
||||
func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
|
||||
rdr := ReaderAtToReader(r, size)
|
||||
rdr, mType := DetermineContentType(rdr, mediaType)
|
||||
return &MediaInfo{
|
||||
size: size,
|
||||
mType: mType,
|
||||
buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
|
||||
media: nil,
|
||||
singleChunk: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
|
||||
if mi != nil {
|
||||
mi.progressUpdater = pu
|
||||
}
|
||||
}
|
||||
|
||||
// UploadType determines the type of upload: a single request, or a resumable
|
||||
// series of requests.
|
||||
func (mi *MediaInfo) UploadType() string {
|
||||
if mi.singleChunk {
|
||||
return "multipart"
|
||||
}
|
||||
return "resumable"
|
||||
}
|
||||
|
||||
// UploadRequest sets up an HTTP request for media upload. It adds headers
|
||||
// as necessary, and returns a replacement for the body.
|
||||
func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, cleanup func()) {
|
||||
cleanup = func() {}
|
||||
if mi == nil {
|
||||
return body, cleanup
|
||||
}
|
||||
var media io.Reader
|
||||
if mi.media != nil {
|
||||
// This only happens when the caller has turned off chunking. In that
|
||||
// case, we write all of media in a single non-retryable request.
|
||||
media = mi.media
|
||||
} else if mi.singleChunk {
|
||||
// The data fits in a single chunk, which has now been read into the MediaBuffer.
|
||||
// We obtain that chunk so we can write it in a single request. The request can
|
||||
// be retried because the data is stored in the MediaBuffer.
|
||||
media, _, _, _ = mi.buffer.Chunk()
|
||||
}
|
||||
if media != nil {
|
||||
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
|
||||
cleanup = func() { combined.Close() }
|
||||
reqHeaders.Set("Content-Type", ctype)
|
||||
body = combined
|
||||
}
|
||||
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
|
||||
reqHeaders.Set("X-Upload-Content-Type", mi.mType)
|
||||
}
|
||||
return body, cleanup
|
||||
}
|
||||
|
||||
// ResumableUpload returns an appropriately configured ResumableUpload value if the
|
||||
// upload is resumable, or nil otherwise.
|
||||
func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
|
||||
if mi == nil || mi.singleChunk {
|
||||
return nil
|
||||
}
|
||||
return &ResumableUpload{
|
||||
URI: locURI,
|
||||
Media: mi.buffer,
|
||||
MediaType: mi.mType,
|
||||
Callback: func(curr int64) {
|
||||
if mi.progressUpdater != nil {
|
||||
mi.progressUpdater(curr, mi.size)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
mb := NewMediaBuffer(media, chunkSize)
|
||||
rdr, _, _, err := mb.Chunk()
|
||||
|
||||
if err == io.EOF { // we can upload this in a single request
|
||||
return rdr, nil
|
||||
}
|
||||
// err might be a non-EOF error. If it is, the next call to mb.Chunk will
|
||||
// return the same error. Returning a MediaBuffer ensures that this error
|
||||
// will be handled at some point.
|
||||
|
||||
return nil, mb
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package gensupport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
@ -32,6 +33,11 @@ func RegisterHook(h Hook) {
|
|||
// If ctx is non-nil, it calls all hooks, then sends the request with
|
||||
// ctxhttp.Do, then calls any functions returned by the hooks in reverse order.
|
||||
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
|
||||
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
|
||||
if _, ok := req.Header["Accept-Encoding"]; ok {
|
||||
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
|
||||
}
|
||||
if ctx == nil {
|
||||
return client.Do(req)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2012 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package transport contains HTTP transports used to make
|
||||
// authenticated API requests.
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// APIKey is an HTTP Transport which wraps an underlying transport and
|
||||
// appends an API Key "key" parameter to the URL of outgoing requests.
|
||||
type APIKey struct {
|
||||
// Key is the API Key to set on requests.
|
||||
Key string
|
||||
|
||||
// Transport is the underlying HTTP transport.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Transport http.RoundTripper
|
||||
}
|
||||
|
||||
func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
rt := t.Transport
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
if rt == nil {
|
||||
return nil, errors.New("googleapi/transport: no Transport specified or available")
|
||||
}
|
||||
}
|
||||
newReq := *req
|
||||
args := newReq.URL.Query()
|
||||
args.Set("key", t.Key)
|
||||
newReq.URL.RawQuery = args.Encode()
|
||||
return rt.RoundTrip(&newReq)
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
// Creds returns credential information obtained from DialSettings, or if none, then
|
||||
// it returns default credential information.
|
||||
func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) {
|
||||
if ds.CredentialsFile != "" {
|
||||
return credFileTokenSource(ctx, ds.CredentialsFile, ds.Scopes...)
|
||||
}
|
||||
if ds.TokenSource != nil {
|
||||
return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil
|
||||
}
|
||||
return google.FindDefaultCredentials(ctx, ds.Scopes...)
|
||||
}
|
||||
|
||||
// credFileTokenSource reads a refresh token file or a service account and returns
|
||||
// a TokenSource constructed from the config.
|
||||
func credFileTokenSource(ctx context.Context, filename string, scope ...string) (*google.DefaultCredentials, error) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read credentials file: %v", err)
|
||||
}
|
||||
// See if it is a refresh token credentials file first.
|
||||
ts, ok, err := refreshTokenTokenSource(ctx, data, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return &google.DefaultCredentials{
|
||||
TokenSource: ts,
|
||||
JSON: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// If not, it should be a service account.
|
||||
cfg, err := google.JWTConfigFromJSON(data, scope...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
|
||||
}
|
||||
// jwt.Config does not expose the project ID, so re-unmarshal to get it.
|
||||
var pid struct {
|
||||
ProjectID string `json:"project_id"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &pid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &google.DefaultCredentials{
|
||||
ProjectID: pid.ProjectID,
|
||||
TokenSource: cfg.TokenSource(ctx),
|
||||
JSON: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func refreshTokenTokenSource(ctx context.Context, data []byte, scope ...string) (oauth2.TokenSource, bool, error) {
|
||||
var c cred
|
||||
if err := json.Unmarshal(data, &c); err != nil {
|
||||
return nil, false, fmt.Errorf("cannot unmarshal credentials file: %v", err)
|
||||
}
|
||||
if c.ClientID == "" || c.ClientSecret == "" || c.RefreshToken == "" || c.Type != "authorized_user" {
|
||||
return nil, false, nil
|
||||
}
|
||||
cfg := &oauth2.Config{
|
||||
ClientID: c.ClientID,
|
||||
ClientSecret: c.ClientSecret,
|
||||
Endpoint: google.Endpoint,
|
||||
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
|
||||
Scopes: scope,
|
||||
}
|
||||
return cfg.TokenSource(ctx, &oauth2.Token{
|
||||
RefreshToken: c.RefreshToken,
|
||||
Expiry: time.Now(),
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
type cred struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Type string `json:"type"`
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"google.golang.org/grpc/naming"
|
||||
)
|
||||
|
||||
// PoolResolver provides a fixed list of addresses to load balance between
|
||||
// and does not provide further updates.
|
||||
type PoolResolver struct {
|
||||
poolSize int
|
||||
dialOpt *DialSettings
|
||||
ch chan []*naming.Update
|
||||
}
|
||||
|
||||
// NewPoolResolver returns a PoolResolver
|
||||
// This is an EXPERIMENTAL API and may be changed or removed in the future.
|
||||
func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
|
||||
return &PoolResolver{poolSize: size, dialOpt: o}
|
||||
}
|
||||
|
||||
// Resolve returns a Watcher for the endpoint defined by the DialSettings
|
||||
// provided to NewPoolResolver.
|
||||
func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
|
||||
if r.dialOpt.Endpoint == "" {
|
||||
return nil, errors.New("No endpoint configured")
|
||||
}
|
||||
addrs := make([]*naming.Update, 0, r.poolSize)
|
||||
for i := 0; i < r.poolSize; i++ {
|
||||
addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
|
||||
}
|
||||
r.ch = make(chan []*naming.Update, 1)
|
||||
r.ch <- addrs
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Next returns a static list of updates on the first call,
|
||||
// and blocks indefinitely until Close is called on subsequent calls.
|
||||
func (r *PoolResolver) Next() ([]*naming.Update, error) {
|
||||
return <-r.ch, nil
|
||||
}
|
||||
|
||||
func (r *PoolResolver) Close() {
|
||||
close(r.ch)
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"type": "service_account",
|
||||
"project_id": "project_id",
|
||||
"private_key_id": "private_key_id",
|
||||
"private_key": "private_key",
|
||||
"client_email": "xyz@developer.gserviceaccount.com",
|
||||
"client_id": "123",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com"
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package internal supports the options and transport packages.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// DialSettings holds information needed to establish a connection with a
|
||||
// Google API service.
|
||||
type DialSettings struct {
|
||||
Endpoint string
|
||||
Scopes []string
|
||||
TokenSource oauth2.TokenSource
|
||||
CredentialsFile string // if set, Token Source is ignored.
|
||||
UserAgent string
|
||||
APIKey string
|
||||
HTTPClient *http.Client
|
||||
GRPCDialOpts []grpc.DialOption
|
||||
GRPCConn *grpc.ClientConn
|
||||
}
|
|
@ -0,0 +1,231 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package iterator provides support for standard Google API iterators.
|
||||
// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
|
||||
package iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Done is returned by an iterator's Next method when the iteration is
|
||||
// complete; when there are no more items to return.
|
||||
var Done = errors.New("no more items in iterator")
|
||||
|
||||
// We don't support mixed calls to Next and NextPage because they play
|
||||
// with the paging state in incompatible ways.
|
||||
var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
|
||||
|
||||
// PageInfo contains information about an iterator's paging state.
|
||||
type PageInfo struct {
|
||||
// Token is the token used to retrieve the next page of items from the
|
||||
// API. You may set Token immediately after creating an iterator to
|
||||
// begin iteration at a particular point. If Token is the empty string,
|
||||
// the iterator will begin with the first eligible item.
|
||||
//
|
||||
// The result of setting Token after the first call to Next is undefined.
|
||||
//
|
||||
// After the underlying API method is called to retrieve a page of items,
|
||||
// Token is set to the next-page token in the response.
|
||||
Token string
|
||||
|
||||
// MaxSize is the maximum number of items returned by a call to the API.
|
||||
// Set MaxSize as a hint to optimize the buffering behavior of the iterator.
|
||||
// If zero, the page size is determined by the underlying service.
|
||||
//
|
||||
// Use Pager to retrieve a page of a specific, exact size.
|
||||
MaxSize int
|
||||
|
||||
// The error state of the iterator. Manipulated by PageInfo.next and Pager.
|
||||
// This is a latch: it starts as nil, and once set should never change.
|
||||
err error
|
||||
|
||||
// If true, no more calls to fetch should be made. Set to true when fetch
|
||||
// returns an empty page token. The iterator is Done when this is true AND
|
||||
// the buffer is empty.
|
||||
atEnd bool
|
||||
|
||||
// Function that fetches a page from the underlying service. It should pass
|
||||
// the pageSize and pageToken arguments to the service, fill the buffer
|
||||
// with the results from the call, and return the next-page token returned
|
||||
// by the service. The function must not remove any existing items from the
|
||||
// buffer. If the underlying RPC takes an int32 page size, pageSize should
|
||||
// be silently truncated.
|
||||
fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
|
||||
|
||||
// Function that clears the iterator's buffer, returning any currently buffered items.
|
||||
bufLen func() int
|
||||
|
||||
// Function that returns the buffer, after setting the buffer variable to nil.
|
||||
takeBuf func() interface{}
|
||||
|
||||
// Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
|
||||
// for calls to both Next and NextPage with the same iterator.
|
||||
nextCalled, nextPageCalled bool
|
||||
}
|
||||
|
||||
// NewPageInfo exposes internals for iterator implementations.
|
||||
// It is not a stable interface.
|
||||
var NewPageInfo = newPageInfo
|
||||
|
||||
// If an iterator can support paging, its iterator-creating method should call
|
||||
// this (via the NewPageInfo variable above).
|
||||
//
|
||||
// The fetch, bufLen and takeBuf arguments provide access to the
|
||||
// iterator's internal slice of buffered items. They behave as described in
|
||||
// PageInfo, above.
|
||||
//
|
||||
// The return value is the PageInfo.next method bound to the returned PageInfo value.
|
||||
// (Returning it avoids exporting PageInfo.next.)
|
||||
func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
|
||||
pi := &PageInfo{
|
||||
fetch: fetch,
|
||||
bufLen: bufLen,
|
||||
takeBuf: takeBuf,
|
||||
}
|
||||
return pi, pi.next
|
||||
}
|
||||
|
||||
// Remaining returns the number of items available before the iterator makes another API call.
|
||||
func (pi *PageInfo) Remaining() int { return pi.bufLen() }
|
||||
|
||||
// next provides support for an iterator's Next function. An iterator's Next
|
||||
// should return the error returned by next if non-nil; else it can assume
|
||||
// there is at least one item in its buffer, and it should return that item and
|
||||
// remove it from the buffer.
|
||||
func (pi *PageInfo) next() error {
|
||||
pi.nextCalled = true
|
||||
if pi.err != nil { // Once we get an error, always return it.
|
||||
// TODO(jba): fix so users can retry on transient errors? Probably not worth it.
|
||||
return pi.err
|
||||
}
|
||||
if pi.nextPageCalled {
|
||||
pi.err = errMixed
|
||||
return pi.err
|
||||
}
|
||||
// Loop until we get some items or reach the end.
|
||||
for pi.bufLen() == 0 && !pi.atEnd {
|
||||
if err := pi.fill(pi.MaxSize); err != nil {
|
||||
pi.err = err
|
||||
return pi.err
|
||||
}
|
||||
if pi.Token == "" {
|
||||
pi.atEnd = true
|
||||
}
|
||||
}
|
||||
// Either the buffer is non-empty or pi.atEnd is true (or both).
|
||||
if pi.bufLen() == 0 {
|
||||
// The buffer is empty and pi.atEnd is true, i.e. the service has no
|
||||
// more items.
|
||||
pi.err = Done
|
||||
}
|
||||
return pi.err
|
||||
}
|
||||
|
||||
// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
|
||||
// next-page token returned by the call.
|
||||
// If fill returns a non-nil error, the buffer will be empty.
|
||||
func (pi *PageInfo) fill(size int) error {
|
||||
tok, err := pi.fetch(size, pi.Token)
|
||||
if err != nil {
|
||||
pi.takeBuf() // clear the buffer
|
||||
return err
|
||||
}
|
||||
pi.Token = tok
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pageable is implemented by iterators that support paging.
|
||||
type Pageable interface {
|
||||
// PageInfo returns paging information associated with the iterator.
|
||||
PageInfo() *PageInfo
|
||||
}
|
||||
|
||||
// Pager supports retrieving iterator items a page at a time.
|
||||
type Pager struct {
|
||||
pageInfo *PageInfo
|
||||
pageSize int
|
||||
}
|
||||
|
||||
// NewPager returns a pager that uses iter. Calls to its NextPage method will
|
||||
// obtain exactly pageSize items, unless fewer remain. The pageToken argument
|
||||
// indicates where to start the iteration. Pass the empty string to start at
|
||||
// the beginning, or pass a token retrieved from a call to Pager.NextPage.
|
||||
//
|
||||
// If you use an iterator with a Pager, you must not call Next on the iterator.
|
||||
func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
|
||||
p := &Pager{
|
||||
pageInfo: iter.PageInfo(),
|
||||
pageSize: pageSize,
|
||||
}
|
||||
p.pageInfo.Token = pageToken
|
||||
if pageSize <= 0 {
|
||||
p.pageInfo.err = errors.New("iterator: page size must be positive")
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// NextPage retrieves a sequence of items from the iterator and appends them
|
||||
// to slicep, which must be a pointer to a slice of the iterator's item type.
|
||||
// Exactly p.pageSize items will be appended, unless fewer remain.
|
||||
//
|
||||
// The first return value is the page token to use for the next page of items.
|
||||
// If empty, there are no more pages. Aside from checking for the end of the
|
||||
// iteration, the returned page token is only needed if the iteration is to be
|
||||
// resumed a later time, in another context (possibly another process).
|
||||
//
|
||||
// The second return value is non-nil if an error occurred. It will never be
|
||||
// the special iterator sentinel value Done. To recognize the end of the
|
||||
// iteration, compare nextPageToken to the empty string.
|
||||
//
|
||||
// It is possible for NextPage to return a single zero-length page along with
|
||||
// an empty page token when there are no more items in the iteration.
|
||||
func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
|
||||
p.pageInfo.nextPageCalled = true
|
||||
if p.pageInfo.err != nil {
|
||||
return "", p.pageInfo.err
|
||||
}
|
||||
if p.pageInfo.nextCalled {
|
||||
p.pageInfo.err = errMixed
|
||||
return "", p.pageInfo.err
|
||||
}
|
||||
if p.pageInfo.bufLen() > 0 {
|
||||
return "", errors.New("must call NextPage with an empty buffer")
|
||||
}
|
||||
// The buffer must be empty here, so takeBuf is a no-op. We call it just to get
|
||||
// the buffer's type.
|
||||
wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
|
||||
if slicep == nil {
|
||||
return "", errors.New("nil passed to Pager.NextPage")
|
||||
}
|
||||
vslicep := reflect.ValueOf(slicep)
|
||||
if vslicep.Type() != wantSliceType {
|
||||
return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
|
||||
}
|
||||
for p.pageInfo.bufLen() < p.pageSize {
|
||||
if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
|
||||
p.pageInfo.err = err
|
||||
return "", p.pageInfo.err
|
||||
}
|
||||
if p.pageInfo.Token == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
e := vslicep.Elem()
|
||||
e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
|
||||
return p.pageInfo.Token, nil
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package option contains options for Google API clients.
|
||||
package option
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// A ClientOption is an option for a Google API client.
|
||||
type ClientOption interface {
|
||||
Apply(*internal.DialSettings)
|
||||
}
|
||||
|
||||
// WithTokenSource returns a ClientOption that specifies an OAuth2 token
|
||||
// source to be used as the basis for authentication.
|
||||
func WithTokenSource(s oauth2.TokenSource) ClientOption {
|
||||
return withTokenSource{s}
|
||||
}
|
||||
|
||||
type withTokenSource struct{ ts oauth2.TokenSource }
|
||||
|
||||
func (w withTokenSource) Apply(o *internal.DialSettings) {
|
||||
o.TokenSource = w.ts
|
||||
}
|
||||
|
||||
type withCredFile string
|
||||
|
||||
func (w withCredFile) Apply(o *internal.DialSettings) {
|
||||
o.CredentialsFile = string(w)
|
||||
}
|
||||
|
||||
// WithCredentialsFile returns a ClientOption that authenticates
|
||||
// API calls with the given service account or refresh token JSON
|
||||
// credentials file.
|
||||
func WithCredentialsFile(filename string) ClientOption {
|
||||
return withCredFile(filename)
|
||||
}
|
||||
|
||||
// WithServiceAccountFile returns a ClientOption that uses a Google service
|
||||
// account credentials file to authenticate.
|
||||
//
|
||||
// Deprecated: Use WithCredentialsFile instead.
|
||||
func WithServiceAccountFile(filename string) ClientOption {
|
||||
return WithCredentialsFile(filename)
|
||||
}
|
||||
|
||||
// WithEndpoint returns a ClientOption that overrides the default endpoint
|
||||
// to be used for a service.
|
||||
func WithEndpoint(url string) ClientOption {
|
||||
return withEndpoint(url)
|
||||
}
|
||||
|
||||
type withEndpoint string
|
||||
|
||||
func (w withEndpoint) Apply(o *internal.DialSettings) {
|
||||
o.Endpoint = string(w)
|
||||
}
|
||||
|
||||
// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
|
||||
// to be used for a service.
|
||||
func WithScopes(scope ...string) ClientOption {
|
||||
return withScopes(scope)
|
||||
}
|
||||
|
||||
type withScopes []string
|
||||
|
||||
func (w withScopes) Apply(o *internal.DialSettings) {
|
||||
s := make([]string, len(w))
|
||||
copy(s, w)
|
||||
o.Scopes = s
|
||||
}
|
||||
|
||||
// WithUserAgent returns a ClientOption that sets the User-Agent.
|
||||
func WithUserAgent(ua string) ClientOption {
|
||||
return withUA(ua)
|
||||
}
|
||||
|
||||
type withUA string
|
||||
|
||||
func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
|
||||
|
||||
// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
|
||||
// as the basis of communications. This option may only be used with services
|
||||
// that support HTTP as their communication transport. When used, the
|
||||
// WithHTTPClient option takes precedent over all other supplied options.
|
||||
func WithHTTPClient(client *http.Client) ClientOption {
|
||||
return withHTTPClient{client}
|
||||
}
|
||||
|
||||
type withHTTPClient struct{ client *http.Client }
|
||||
|
||||
func (w withHTTPClient) Apply(o *internal.DialSettings) {
|
||||
o.HTTPClient = w.client
|
||||
}
|
||||
|
||||
// WithGRPCConn returns a ClientOption that specifies the gRPC client
|
||||
// connection to use as the basis of communications. This option many only be
|
||||
// used with services that support gRPC as their communication transport. When
|
||||
// used, the WithGRPCConn option takes precedent over all other supplied
|
||||
// options.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
|
||||
return withGRPCConn{conn}
|
||||
}
|
||||
|
||||
type withGRPCConn struct{ conn *grpc.ClientConn }
|
||||
|
||||
func (w withGRPCConn) Apply(o *internal.DialSettings) {
|
||||
o.GRPCConn = w.conn
|
||||
}
|
||||
|
||||
// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
|
||||
// to an underlying gRPC dial. It does not work with WithGRPCConn.
|
||||
func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
|
||||
return withGRPCDialOption{opt}
|
||||
}
|
||||
|
||||
type withGRPCDialOption struct{ opt grpc.DialOption }
|
||||
|
||||
func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
|
||||
o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
|
||||
}
|
||||
|
||||
// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
|
||||
// connections that requests will be balanced between.
|
||||
// This is an EXPERIMENTAL API and may be changed or removed in the future.
|
||||
func WithGRPCConnectionPool(size int) ClientOption {
|
||||
return withGRPCConnectionPool(size)
|
||||
}
|
||||
|
||||
type withGRPCConnectionPool int
|
||||
|
||||
func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
|
||||
balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
|
||||
o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
|
||||
}
|
||||
|
||||
// WithAPIKey returns a ClientOption that specifies an API key to be used
|
||||
// as the basis for authentication.
|
||||
func WithAPIKey(apiKey string) ClientOption {
|
||||
return withAPIKey(apiKey)
|
||||
}
|
||||
|
||||
type withAPIKey string
|
||||
|
||||
func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transport/http supports network connections to HTTP servers.
|
||||
// This package is not intended for use by end developers. Use the
|
||||
// google.golang.org/api/option package to configure API clients.
|
||||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/googleapi/transport"
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// NewClient returns an HTTP client for use communicating with a Google cloud
|
||||
// service, configured with the given ClientOptions. It also returns the endpoint
|
||||
// for the service as specified in the options.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
|
||||
var o internal.DialSettings
|
||||
for _, opt := range opts {
|
||||
opt.Apply(&o)
|
||||
}
|
||||
if o.GRPCConn != nil {
|
||||
return nil, "", errors.New("unsupported gRPC connection specified")
|
||||
}
|
||||
// TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
|
||||
if o.HTTPClient != nil {
|
||||
return o.HTTPClient, o.Endpoint, nil
|
||||
}
|
||||
if o.APIKey != "" {
|
||||
hc := &http.Client{
|
||||
Transport: &transport.APIKey{
|
||||
Key: o.APIKey,
|
||||
Transport: userAgentTransport{
|
||||
base: baseTransport(ctx),
|
||||
userAgent: o.UserAgent,
|
||||
},
|
||||
},
|
||||
}
|
||||
return hc, o.Endpoint, nil
|
||||
}
|
||||
creds, err := internal.Creds(ctx, &o)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
hc := &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: creds.TokenSource,
|
||||
Base: userAgentTransport{
|
||||
base: baseTransport(ctx),
|
||||
userAgent: o.UserAgent,
|
||||
},
|
||||
},
|
||||
}
|
||||
return hc, o.Endpoint, nil
|
||||
}
|
||||
|
||||
type userAgentTransport struct {
|
||||
userAgent string
|
||||
base http.RoundTripper
|
||||
}
|
||||
|
||||
func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
rt := t.base
|
||||
if rt == nil {
|
||||
return nil, errors.New("transport: no Transport specified")
|
||||
}
|
||||
if t.userAgent == "" {
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
newReq := *req
|
||||
newReq.Header = make(http.Header)
|
||||
for k, vv := range req.Header {
|
||||
newReq.Header[k] = vv
|
||||
}
|
||||
// TODO(cbro): append to existing User-Agent header?
|
||||
newReq.Header["User-Agent"] = []string{t.userAgent}
|
||||
return rt.RoundTrip(&newReq)
|
||||
}
|
||||
|
||||
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
|
||||
var appengineUrlfetchHook func(context.Context) http.RoundTripper
|
||||
|
||||
// baseTransport returns the base HTTP transport.
|
||||
// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
|
||||
func baseTransport(ctx context.Context) http.RoundTripper {
|
||||
if appengineUrlfetchHook != nil {
|
||||
return appengineUrlfetchHook(ctx)
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
64
vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
generated
vendored
Normal file
64
vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/api/annotations.proto
|
||||
|
||||
/*
|
||||
Package annotations is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google/api/annotations.proto
|
||||
google/api/http.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Http
|
||||
HttpRule
|
||||
CustomHttpPattern
|
||||
*/
|
||||
package annotations
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
var E_Http = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.MethodOptions)(nil),
|
||||
ExtensionType: (*HttpRule)(nil),
|
||||
Field: 72295728,
|
||||
Name: "google.api.http",
|
||||
Tag: "bytes,72295728,opt,name=http",
|
||||
Filename: "google/api/annotations.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_Http)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 208 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
|
||||
0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
|
||||
0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
|
||||
0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
|
||||
0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
|
||||
0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
|
||||
0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
|
||||
0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
|
||||
0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5,
|
||||
0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64,
|
||||
0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d,
|
||||
0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00,
|
||||
}
|
566
vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
generated
vendored
Normal file
566
vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,566 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/api/http.proto
|
||||
|
||||
package annotations
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// Defines the HTTP configuration for a service. It contains a list of
|
||||
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
|
||||
// to one or more HTTP REST API methods.
|
||||
type Http struct {
|
||||
// A list of HTTP configuration rules that apply to individual API methods.
|
||||
//
|
||||
// **NOTE:** All service configuration rules follow "last one wins" order.
|
||||
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Http) Reset() { *m = Http{} }
|
||||
func (m *Http) String() string { return proto.CompactTextString(m) }
|
||||
func (*Http) ProtoMessage() {}
|
||||
func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
|
||||
|
||||
func (m *Http) GetRules() []*HttpRule {
|
||||
if m != nil {
|
||||
return m.Rules
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// `HttpRule` defines the mapping of an RPC method to one or more HTTP
|
||||
// REST APIs. The mapping determines what portions of the request
|
||||
// message are populated from the path, query parameters, or body of
|
||||
// the HTTP request. The mapping is typically specified as an
|
||||
// `google.api.http` annotation, see "google/api/annotations.proto"
|
||||
// for details.
|
||||
//
|
||||
// The mapping consists of a field specifying the path template and
|
||||
// method kind. The path template can refer to fields in the request
|
||||
// message, as in the example below which describes a REST GET
|
||||
// operation on a resource collection of messages:
|
||||
//
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// message SubMessage {
|
||||
// string subfield = 1;
|
||||
// }
|
||||
// string message_id = 1; // mapped to the URL
|
||||
// SubMessage sub = 2; // `sub.subfield` is url-mapped
|
||||
// }
|
||||
// message Message {
|
||||
// string text = 1; // content of the resource
|
||||
// }
|
||||
//
|
||||
// The same http annotation can alternatively be expressed inside the
|
||||
// `GRPC API Configuration` YAML file.
|
||||
//
|
||||
// http:
|
||||
// rules:
|
||||
// - selector: <proto_package_name>.Messaging.GetMessage
|
||||
// get: /v1/messages/{message_id}/{sub.subfield}
|
||||
//
|
||||
// This definition enables an automatic, bidrectional mapping of HTTP
|
||||
// JSON to RPC. Example:
|
||||
//
|
||||
// HTTP | RPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
|
||||
//
|
||||
// In general, not only fields but also field paths can be referenced
|
||||
// from a path pattern. Fields mapped to the path pattern cannot be
|
||||
// repeated and must have a primitive (non-message) type.
|
||||
//
|
||||
// Any fields in the request message which are not bound by the path
|
||||
// pattern automatically become (optional) HTTP query
|
||||
// parameters. Assume the following definition of the request message:
|
||||
//
|
||||
//
|
||||
// message GetMessageRequest {
|
||||
// message SubMessage {
|
||||
// string subfield = 1;
|
||||
// }
|
||||
// string message_id = 1; // mapped to the URL
|
||||
// int64 revision = 2; // becomes a parameter
|
||||
// SubMessage sub = 3; // `sub.subfield` becomes a parameter
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This enables a HTTP JSON to RPC mapping as below:
|
||||
//
|
||||
// HTTP | RPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
|
||||
//
|
||||
// Note that fields which are mapped to HTTP parameters must have a
|
||||
// primitive type or a repeated primitive type. Message types are not
|
||||
// allowed. In the case of a repeated type, the parameter can be
|
||||
// repeated in the URL, as in `...?param=A¶m=B`.
|
||||
//
|
||||
// For HTTP method kinds which allow a request body, the `body` field
|
||||
// specifies the mapping. Consider a REST update method on the
|
||||
// message resource collection:
|
||||
//
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// put: "/v1/messages/{message_id}"
|
||||
// body: "message"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message UpdateMessageRequest {
|
||||
// string message_id = 1; // mapped to the URL
|
||||
// Message message = 2; // mapped to the body
|
||||
// }
|
||||
//
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled, where the
|
||||
// representation of the JSON in the request body is determined by
|
||||
// protos JSON encoding:
|
||||
//
|
||||
// HTTP | RPC
|
||||
// -----|-----
|
||||
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
|
||||
//
|
||||
// The special name `*` can be used in the body mapping to define that
|
||||
// every field not bound by the path template should be mapped to the
|
||||
// request body. This enables the following alternative definition of
|
||||
// the update method:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(Message) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// put: "/v1/messages/{message_id}"
|
||||
// body: "*"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message Message {
|
||||
// string message_id = 1;
|
||||
// string text = 2;
|
||||
// }
|
||||
//
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled:
|
||||
//
|
||||
// HTTP | RPC
|
||||
// -----|-----
|
||||
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
|
||||
//
|
||||
// Note that when using `*` in the body mapping, it is not possible to
|
||||
// have HTTP parameters, as all fields not bound by the path end in
|
||||
// the body. This makes this option more rarely used in practice of
|
||||
// defining REST APIs. The common usage of `*` is in custom methods
|
||||
// which don't use the URL at all for transferring data.
|
||||
//
|
||||
// It is possible to define multiple HTTP methods for one RPC by using
|
||||
// the `additional_bindings` option. Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/messages/{message_id}"
|
||||
// additional_bindings {
|
||||
// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string message_id = 1;
|
||||
// string user_id = 2;
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This enables the following two alternative HTTP JSON to RPC
|
||||
// mappings:
|
||||
//
|
||||
// HTTP | RPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
|
||||
// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
|
||||
//
|
||||
// # Rules for HTTP mapping
|
||||
//
|
||||
// The rules for mapping HTTP path, query parameters, and body fields
|
||||
// to the request message are as follows:
|
||||
//
|
||||
// 1. The `body` field specifies either `*` or a field path, or is
|
||||
// omitted. If omitted, it assumes there is no HTTP body.
|
||||
// 2. Leaf fields (recursive expansion of nested messages in the
|
||||
// request) can be classified into three types:
|
||||
// (a) Matched in the URL template.
|
||||
// (b) Covered by body (if body is `*`, everything except (a) fields;
|
||||
// else everything under the body field)
|
||||
// (c) All other fields.
|
||||
// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
|
||||
// 4. Any body sent with an HTTP request can contain only (b) fields.
|
||||
//
|
||||
// The syntax of the path template is as follows:
|
||||
//
|
||||
// Template = "/" Segments [ Verb ] ;
|
||||
// Segments = Segment { "/" Segment } ;
|
||||
// Segment = "*" | "**" | LITERAL | Variable ;
|
||||
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||
// FieldPath = IDENT { "." IDENT } ;
|
||||
// Verb = ":" LITERAL ;
|
||||
//
|
||||
// The syntax `*` matches a single path segment. It follows the semantics of
|
||||
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
|
||||
// Expansion.
|
||||
//
|
||||
// The syntax `**` matches zero or more path segments. It follows the semantics
|
||||
// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
|
||||
// Expansion. NOTE: it must be the last segment in the path except the Verb.
|
||||
//
|
||||
// The syntax `LITERAL` matches literal text in the URL path.
|
||||
//
|
||||
// The syntax `Variable` matches the entire path as specified by its template;
|
||||
// this nested template must not contain further variables. If a variable
|
||||
// matches a single path segment, its template may be omitted, e.g. `{var}`
|
||||
// is equivalent to `{var=*}`.
|
||||
//
|
||||
// NOTE: the field paths in variables and in the `body` must not refer to
|
||||
// repeated fields or map fields.
|
||||
//
|
||||
// Use CustomHttpPattern to specify any HTTP method that is not included in the
|
||||
// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
|
||||
// a given URL path rule. The wild-card rule is useful for services that provide
|
||||
// content to Web (HTML) clients.
|
||||
type HttpRule struct {
|
||||
// Selects methods to which this rule applies.
|
||||
//
|
||||
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
|
||||
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
|
||||
// Determines the URL pattern is matched by this rules. This pattern can be
|
||||
// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||
// can be defined using the 'custom' field.
|
||||
//
|
||||
// Types that are valid to be assigned to Pattern:
|
||||
// *HttpRule_Get
|
||||
// *HttpRule_Put
|
||||
// *HttpRule_Post
|
||||
// *HttpRule_Delete
|
||||
// *HttpRule_Patch
|
||||
// *HttpRule_Custom
|
||||
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
|
||||
// The name of the request field whose value is mapped to the HTTP body, or
|
||||
// `*` for mapping all fields not captured by the path pattern to the HTTP
|
||||
// body. NOTE: the referred field must not be a repeated field and must be
|
||||
// present at the top-level of request message type.
|
||||
Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"`
|
||||
// Additional HTTP bindings for the selector. Nested bindings must
|
||||
// not contain an `additional_bindings` field themselves (that is,
|
||||
// the nesting may only be one level deep).
|
||||
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HttpRule) Reset() { *m = HttpRule{} }
|
||||
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
|
||||
func (*HttpRule) ProtoMessage() {}
|
||||
func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
|
||||
|
||||
type isHttpRule_Pattern interface {
|
||||
isHttpRule_Pattern()
|
||||
}
|
||||
|
||||
type HttpRule_Get struct {
|
||||
Get string `protobuf:"bytes,2,opt,name=get,oneof"`
|
||||
}
|
||||
type HttpRule_Put struct {
|
||||
Put string `protobuf:"bytes,3,opt,name=put,oneof"`
|
||||
}
|
||||
type HttpRule_Post struct {
|
||||
Post string `protobuf:"bytes,4,opt,name=post,oneof"`
|
||||
}
|
||||
type HttpRule_Delete struct {
|
||||
Delete string `protobuf:"bytes,5,opt,name=delete,oneof"`
|
||||
}
|
||||
type HttpRule_Patch struct {
|
||||
Patch string `protobuf:"bytes,6,opt,name=patch,oneof"`
|
||||
}
|
||||
type HttpRule_Custom struct {
|
||||
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"`
|
||||
}
|
||||
|
||||
func (*HttpRule_Get) isHttpRule_Pattern() {}
|
||||
func (*HttpRule_Put) isHttpRule_Pattern() {}
|
||||
func (*HttpRule_Post) isHttpRule_Pattern() {}
|
||||
func (*HttpRule_Delete) isHttpRule_Pattern() {}
|
||||
func (*HttpRule_Patch) isHttpRule_Pattern() {}
|
||||
func (*HttpRule_Custom) isHttpRule_Pattern() {}
|
||||
|
||||
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
|
||||
if m != nil {
|
||||
return m.Pattern
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetSelector() string {
|
||||
if m != nil {
|
||||
return m.Selector
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetGet() string {
|
||||
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
|
||||
return x.Get
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetPut() string {
|
||||
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
|
||||
return x.Put
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetPost() string {
|
||||
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
|
||||
return x.Post
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetDelete() string {
|
||||
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
|
||||
return x.Delete
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetPatch() string {
|
||||
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
|
||||
return x.Patch
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetCustom() *CustomHttpPattern {
|
||||
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
|
||||
return x.Custom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetBody() string {
|
||||
if m != nil {
|
||||
return m.Body
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
|
||||
if m != nil {
|
||||
return m.AdditionalBindings
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
|
||||
(*HttpRule_Get)(nil),
|
||||
(*HttpRule_Put)(nil),
|
||||
(*HttpRule_Post)(nil),
|
||||
(*HttpRule_Delete)(nil),
|
||||
(*HttpRule_Patch)(nil),
|
||||
(*HttpRule_Custom)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*HttpRule)
|
||||
// pattern
|
||||
switch x := m.Pattern.(type) {
|
||||
case *HttpRule_Get:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.Get)
|
||||
case *HttpRule_Put:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.Put)
|
||||
case *HttpRule_Post:
|
||||
b.EncodeVarint(4<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.Post)
|
||||
case *HttpRule_Delete:
|
||||
b.EncodeVarint(5<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.Delete)
|
||||
case *HttpRule_Patch:
|
||||
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.Patch)
|
||||
case *HttpRule_Custom:
|
||||
b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.Custom); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*HttpRule)
|
||||
switch tag {
|
||||
case 2: // pattern.get
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Pattern = &HttpRule_Get{x}
|
||||
return true, err
|
||||
case 3: // pattern.put
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Pattern = &HttpRule_Put{x}
|
||||
return true, err
|
||||
case 4: // pattern.post
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Pattern = &HttpRule_Post{x}
|
||||
return true, err
|
||||
case 5: // pattern.delete
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Pattern = &HttpRule_Delete{x}
|
||||
return true, err
|
||||
case 6: // pattern.patch
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Pattern = &HttpRule_Patch{x}
|
||||
return true, err
|
||||
case 8: // pattern.custom
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(CustomHttpPattern)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Pattern = &HttpRule_Custom{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _HttpRule_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*HttpRule)
|
||||
// pattern
|
||||
switch x := m.Pattern.(type) {
|
||||
case *HttpRule_Get:
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(len(x.Get)))
|
||||
n += len(x.Get)
|
||||
case *HttpRule_Put:
|
||||
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(len(x.Put)))
|
||||
n += len(x.Put)
|
||||
case *HttpRule_Post:
|
||||
n += proto.SizeVarint(4<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(len(x.Post)))
|
||||
n += len(x.Post)
|
||||
case *HttpRule_Delete:
|
||||
n += proto.SizeVarint(5<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(len(x.Delete)))
|
||||
n += len(x.Delete)
|
||||
case *HttpRule_Patch:
|
||||
n += proto.SizeVarint(6<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(len(x.Patch)))
|
||||
n += len(x.Patch)
|
||||
case *HttpRule_Custom:
|
||||
s := proto.Size(x.Custom)
|
||||
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// A custom pattern is used for defining custom HTTP verb.
|
||||
type CustomHttpPattern struct {
|
||||
// The name of this custom HTTP verb.
|
||||
Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
|
||||
// The path matched by this custom verb.
|
||||
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
|
||||
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
|
||||
func (*CustomHttpPattern) ProtoMessage() {}
|
||||
func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
|
||||
|
||||
func (m *CustomHttpPattern) GetKind() string {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CustomHttpPattern) GetPath() string {
|
||||
if m != nil {
|
||||
return m.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Http)(nil), "google.api.Http")
|
||||
proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
|
||||
proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) }
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 359 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30,
|
||||
0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29,
|
||||
0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1,
|
||||
0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe,
|
||||
0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8,
|
||||
0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39,
|
||||
0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62,
|
||||
0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18,
|
||||
0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2,
|
||||
0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48,
|
||||
0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24,
|
||||
0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49,
|
||||
0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc,
|
||||
0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84,
|
||||
0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12,
|
||||
0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74,
|
||||
0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4,
|
||||
0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67,
|
||||
0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90,
|
||||
0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64,
|
||||
0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a,
|
||||
0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68,
|
||||
0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00,
|
||||
}
|
337
vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
generated
vendored
Normal file
337
vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,337 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/iam/v1/iam_policy.proto
|
||||
|
||||
/*
|
||||
Package iam is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google/iam/v1/iam_policy.proto
|
||||
google/iam/v1/policy.proto
|
||||
|
||||
It has these top-level messages:
|
||||
SetIamPolicyRequest
|
||||
GetIamPolicyRequest
|
||||
TestIamPermissionsRequest
|
||||
TestIamPermissionsResponse
|
||||
Policy
|
||||
Binding
|
||||
PolicyDelta
|
||||
BindingDelta
|
||||
*/
|
||||
package iam
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Request message for `SetIamPolicy` method.
|
||||
type SetIamPolicyRequest struct {
|
||||
// REQUIRED: The resource for which the policy is being specified.
|
||||
// `resource` is usually specified as a path. For example, a Project
|
||||
// resource is specified as `projects/{project}`.
|
||||
Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
|
||||
// REQUIRED: The complete policy to be applied to the `resource`. The size of
|
||||
// the policy is limited to a few 10s of KB. An empty policy is a
|
||||
// valid policy but certain Cloud Platform services (such as Projects)
|
||||
// might reject them.
|
||||
Policy *Policy `protobuf:"bytes,2,opt,name=policy" json:"policy,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} }
|
||||
func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetIamPolicyRequest) ProtoMessage() {}
|
||||
func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *SetIamPolicyRequest) GetResource() string {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SetIamPolicyRequest) GetPolicy() *Policy {
|
||||
if m != nil {
|
||||
return m.Policy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Request message for `GetIamPolicy` method.
|
||||
type GetIamPolicyRequest struct {
|
||||
// REQUIRED: The resource for which the policy is being requested.
|
||||
// `resource` is usually specified as a path. For example, a Project
|
||||
// resource is specified as `projects/{project}`.
|
||||
Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} }
|
||||
func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetIamPolicyRequest) ProtoMessage() {}
|
||||
func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *GetIamPolicyRequest) GetResource() string {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Request message for `TestIamPermissions` method.
|
||||
type TestIamPermissionsRequest struct {
|
||||
// REQUIRED: The resource for which the policy detail is being requested.
|
||||
// `resource` is usually specified as a path. For example, a Project
|
||||
// resource is specified as `projects/{project}`.
|
||||
Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
|
||||
// The set of permissions to check for the `resource`. Permissions with
|
||||
// wildcards (such as '*' or 'storage.*') are not allowed. For more
|
||||
// information see
|
||||
// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
|
||||
Permissions []string `protobuf:"bytes,2,rep,name=permissions" json:"permissions,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} }
|
||||
func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TestIamPermissionsRequest) ProtoMessage() {}
|
||||
func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *TestIamPermissionsRequest) GetResource() string {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *TestIamPermissionsRequest) GetPermissions() []string {
|
||||
if m != nil {
|
||||
return m.Permissions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response message for `TestIamPermissions` method.
|
||||
type TestIamPermissionsResponse struct {
|
||||
// A subset of `TestPermissionsRequest.permissions` that the caller is
|
||||
// allowed.
|
||||
Permissions []string `protobuf:"bytes,1,rep,name=permissions" json:"permissions,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} }
|
||||
func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TestIamPermissionsResponse) ProtoMessage() {}
|
||||
func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *TestIamPermissionsResponse) GetPermissions() []string {
|
||||
if m != nil {
|
||||
return m.Permissions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest")
|
||||
proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest")
|
||||
proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest")
|
||||
proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for IAMPolicy service
|
||||
|
||||
type IAMPolicyClient interface {
|
||||
// Sets the access control policy on the specified resource. Replaces any
|
||||
// existing policy.
|
||||
SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
|
||||
// Gets the access control policy for a resource.
|
||||
// Returns an empty policy if the resource exists and does not have a policy
|
||||
// set.
|
||||
GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
|
||||
// Returns permissions that a caller has on the specified resource.
|
||||
// If the resource does not exist, this will return an empty set of
|
||||
// permissions, not a NOT_FOUND error.
|
||||
TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error)
|
||||
}
|
||||
|
||||
type iAMPolicyClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient {
|
||||
return &iAMPolicyClient{cc}
|
||||
}
|
||||
|
||||
func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
|
||||
out := new(Policy)
|
||||
err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
|
||||
out := new(Policy)
|
||||
err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
|
||||
out := new(TestIamPermissionsResponse)
|
||||
err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for IAMPolicy service
|
||||
|
||||
type IAMPolicyServer interface {
|
||||
// Sets the access control policy on the specified resource. Replaces any
|
||||
// existing policy.
|
||||
SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error)
|
||||
// Gets the access control policy for a resource.
|
||||
// Returns an empty policy if the resource exists and does not have a policy
|
||||
// set.
|
||||
GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error)
|
||||
// Returns permissions that a caller has on the specified resource.
|
||||
// If the resource does not exist, this will return an empty set of
|
||||
// permissions, not a NOT_FOUND error.
|
||||
TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error)
|
||||
}
|
||||
|
||||
func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
|
||||
s.RegisterService(&_IAMPolicy_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SetIamPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetIamPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TestIamPermissionsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.iam.v1.IAMPolicy",
|
||||
HandlerType: (*IAMPolicyServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "SetIamPolicy",
|
||||
Handler: _IAMPolicy_SetIamPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetIamPolicy",
|
||||
Handler: _IAMPolicy_GetIamPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "TestIamPermissions",
|
||||
Handler: _IAMPolicy_TestIamPermissions_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/iam/v1/iam_policy.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 396 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcf, 0x4a, 0xe3, 0x40,
|
||||
0x18, 0x67, 0x52, 0x28, 0xdb, 0xe9, 0xee, 0xc2, 0xa6, 0x2c, 0xd4, 0x20, 0x25, 0x8c, 0x1e, 0xd2,
|
||||
0x80, 0x13, 0x53, 0x6f, 0x15, 0x05, 0xeb, 0x21, 0xf4, 0x20, 0x94, 0x2a, 0x82, 0x5e, 0x74, 0xac,
|
||||
0x43, 0x18, 0x48, 0x32, 0x31, 0x33, 0x2d, 0x88, 0x78, 0xf1, 0x15, 0xf4, 0xe4, 0x23, 0xf8, 0x3a,
|
||||
0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x92, 0x89, 0x35, 0x6d, 0xaa, 0x54, 0xf0, 0x54, 0x3a, 0xf3, 0xfb,
|
||||
0xf7, 0xfd, 0xbe, 0x0c, 0x6c, 0xf9, 0x9c, 0xfb, 0x01, 0x75, 0x18, 0x09, 0x9d, 0x89, 0x9b, 0xfe,
|
||||
0x9c, 0xc5, 0x3c, 0x60, 0xa3, 0x6b, 0x1c, 0x27, 0x5c, 0x72, 0xfd, 0x8f, 0xba, 0xc7, 0x8c, 0x84,
|
||||
0x78, 0xe2, 0x1a, 0xab, 0x39, 0x9c, 0xc4, 0xcc, 0x21, 0x51, 0xc4, 0x25, 0x91, 0x8c, 0x47, 0x42,
|
||||
0x81, 0x0d, 0x63, 0x56, 0xac, 0x28, 0x84, 0xce, 0x61, 0xe3, 0x90, 0xca, 0x3e, 0x09, 0x07, 0xd9,
|
||||
0xe9, 0x90, 0x5e, 0x8d, 0xa9, 0x90, 0xba, 0x01, 0x7f, 0x25, 0x54, 0xf0, 0x71, 0x32, 0xa2, 0x4d,
|
||||
0x60, 0x02, 0xab, 0x36, 0x9c, 0xfe, 0xd7, 0x37, 0x60, 0x55, 0x49, 0x34, 0x35, 0x13, 0x58, 0xf5,
|
||||
0xce, 0x7f, 0x3c, 0x13, 0x06, 0xe7, 0x4a, 0x39, 0x08, 0xb9, 0xb0, 0xe1, 0x7d, 0xcf, 0x01, 0x9d,
|
||||
0xc0, 0x95, 0x23, 0x2a, 0x32, 0x0e, 0x4d, 0x42, 0x26, 0x44, 0x3a, 0xcc, 0x32, 0xd1, 0x4c, 0x58,
|
||||
0x8f, 0x3f, 0x18, 0x4d, 0xcd, 0xac, 0x58, 0xb5, 0x61, 0xf1, 0x08, 0xed, 0x42, 0x63, 0x91, 0xb4,
|
||||
0x88, 0x79, 0x24, 0x4a, 0x7c, 0x50, 0xe2, 0x77, 0x1e, 0x2a, 0xb0, 0xd6, 0xdf, 0x3b, 0x50, 0xb3,
|
||||
0xe8, 0x12, 0xfe, 0x2e, 0xb6, 0xa7, 0xa3, 0xb9, 0x2a, 0x16, 0x54, 0x6b, 0x2c, 0xae, 0x0b, 0xb5,
|
||||
0xef, 0x9e, 0x5f, 0xee, 0xb5, 0x35, 0xd4, 0x4a, 0x57, 0x74, 0xf3, 0x3e, 0xd1, 0x8e, 0x6d, 0xdf,
|
||||
0x76, 0x45, 0x41, 0xa5, 0x0b, 0xec, 0xd4, 0xd5, 0xfb, 0xca, 0xd5, 0xfb, 0x11, 0x57, 0x7f, 0xce,
|
||||
0xf5, 0x11, 0x40, 0xbd, 0x5c, 0x9d, 0x6e, 0xcd, 0x09, 0x7f, 0xba, 0x38, 0xa3, 0xbd, 0x04, 0x52,
|
||||
0xed, 0x01, 0x39, 0x59, 0xac, 0x36, 0x5a, 0x2f, 0xc7, 0x92, 0x25, 0x56, 0x17, 0xd8, 0xbd, 0x18,
|
||||
0xfe, 0x1b, 0xf1, 0x70, 0xd6, 0xa0, 0xf7, 0x77, 0x9a, 0x7f, 0x90, 0x7e, 0xeb, 0x03, 0x70, 0xba,
|
||||
0x99, 0x03, 0x7c, 0x1e, 0x90, 0xc8, 0xc7, 0x3c, 0xf1, 0x1d, 0x9f, 0x46, 0xd9, 0x4b, 0x70, 0xd4,
|
||||
0x15, 0x89, 0x99, 0xc8, 0x1f, 0xca, 0x36, 0x23, 0xe1, 0x2b, 0x00, 0x4f, 0x5a, 0xc3, 0x53, 0xac,
|
||||
0xfd, 0x80, 0x8f, 0x2f, 0x71, 0x9f, 0x84, 0xf8, 0xd8, 0xbd, 0xa8, 0x66, 0xac, 0xad, 0xb7, 0x00,
|
||||
0x00, 0x00, 0xff, 0xff, 0x6c, 0x3a, 0x2b, 0x4d, 0xaa, 0x03, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,269 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/iam/v1/policy.proto
|
||||
|
||||
package iam
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// The type of action performed on a Binding in a policy.
|
||||
type BindingDelta_Action int32
|
||||
|
||||
const (
|
||||
// Unspecified.
|
||||
BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0
|
||||
// Addition of a Binding.
|
||||
BindingDelta_ADD BindingDelta_Action = 1
|
||||
// Removal of a Binding.
|
||||
BindingDelta_REMOVE BindingDelta_Action = 2
|
||||
)
|
||||
|
||||
var BindingDelta_Action_name = map[int32]string{
|
||||
0: "ACTION_UNSPECIFIED",
|
||||
1: "ADD",
|
||||
2: "REMOVE",
|
||||
}
|
||||
var BindingDelta_Action_value = map[string]int32{
|
||||
"ACTION_UNSPECIFIED": 0,
|
||||
"ADD": 1,
|
||||
"REMOVE": 2,
|
||||
}
|
||||
|
||||
func (x BindingDelta_Action) String() string {
|
||||
return proto.EnumName(BindingDelta_Action_name, int32(x))
|
||||
}
|
||||
func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} }
|
||||
|
||||
// Defines an Identity and Access Management (IAM) policy. It is used to
|
||||
// specify access control policies for Cloud Platform resources.
|
||||
//
|
||||
//
|
||||
// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of
|
||||
// `members` to a `role`, where the members can be user accounts, Google groups,
|
||||
// Google domains, and service accounts. A `role` is a named list of permissions
|
||||
// defined by IAM.
|
||||
//
|
||||
// **Example**
|
||||
//
|
||||
// {
|
||||
// "bindings": [
|
||||
// {
|
||||
// "role": "roles/owner",
|
||||
// "members": [
|
||||
// "user:mike@example.com",
|
||||
// "group:admins@example.com",
|
||||
// "domain:google.com",
|
||||
// "serviceAccount:my-other-app@appspot.gserviceaccount.com",
|
||||
// ]
|
||||
// },
|
||||
// {
|
||||
// "role": "roles/viewer",
|
||||
// "members": ["user:sean@example.com"]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// For a description of IAM and its features, see the
|
||||
// [IAM developer's guide](https://cloud.google.com/iam).
|
||||
type Policy struct {
|
||||
// Version of the `Policy`. The default version is 0.
|
||||
Version int32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
|
||||
// Associates a list of `members` to a `role`.
|
||||
// Multiple `bindings` must not be specified for the same `role`.
|
||||
// `bindings` with no members will result in an error.
|
||||
Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings" json:"bindings,omitempty"`
|
||||
// `etag` is used for optimistic concurrency control as a way to help
|
||||
// prevent simultaneous updates of a policy from overwriting each other.
|
||||
// It is strongly suggested that systems make use of the `etag` in the
|
||||
// read-modify-write cycle to perform policy updates in order to avoid race
|
||||
// conditions: An `etag` is returned in the response to `getIamPolicy`, and
|
||||
// systems are expected to put that etag in the request to `setIamPolicy` to
|
||||
// ensure that their change will be applied to the same version of the policy.
|
||||
//
|
||||
// If no `etag` is provided in the call to `setIamPolicy`, then the existing
|
||||
// policy is overwritten blindly.
|
||||
Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Policy) Reset() { *m = Policy{} }
|
||||
func (m *Policy) String() string { return proto.CompactTextString(m) }
|
||||
func (*Policy) ProtoMessage() {}
|
||||
func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
|
||||
|
||||
func (m *Policy) GetVersion() int32 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Policy) GetBindings() []*Binding {
|
||||
if m != nil {
|
||||
return m.Bindings
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Policy) GetEtag() []byte {
|
||||
if m != nil {
|
||||
return m.Etag
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Associates `members` with a `role`.
|
||||
type Binding struct {
|
||||
// Role that is assigned to `members`.
|
||||
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
|
||||
// Required
|
||||
Role string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"`
|
||||
// Specifies the identities requesting access for a Cloud Platform resource.
|
||||
// `members` can have the following values:
|
||||
//
|
||||
// * `allUsers`: A special identifier that represents anyone who is
|
||||
// on the internet; with or without a Google account.
|
||||
//
|
||||
// * `allAuthenticatedUsers`: A special identifier that represents anyone
|
||||
// who is authenticated with a Google account or a service account.
|
||||
//
|
||||
// * `user:{emailid}`: An email address that represents a specific Google
|
||||
// account. For example, `alice@gmail.com` or `joe@example.com`.
|
||||
//
|
||||
//
|
||||
// * `serviceAccount:{emailid}`: An email address that represents a service
|
||||
// account. For example, `my-other-app@appspot.gserviceaccount.com`.
|
||||
//
|
||||
// * `group:{emailid}`: An email address that represents a Google group.
|
||||
// For example, `admins@example.com`.
|
||||
//
|
||||
// * `domain:{domain}`: A Google Apps domain name that represents all the
|
||||
// users of that domain. For example, `google.com` or `example.com`.
|
||||
//
|
||||
//
|
||||
Members []string `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Binding) Reset() { *m = Binding{} }
|
||||
func (m *Binding) String() string { return proto.CompactTextString(m) }
|
||||
func (*Binding) ProtoMessage() {}
|
||||
func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
|
||||
|
||||
func (m *Binding) GetRole() string {
|
||||
if m != nil {
|
||||
return m.Role
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Binding) GetMembers() []string {
|
||||
if m != nil {
|
||||
return m.Members
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The difference delta between two policies.
|
||||
type PolicyDelta struct {
|
||||
// The delta for Bindings between two policies.
|
||||
BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas" json:"binding_deltas,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PolicyDelta) Reset() { *m = PolicyDelta{} }
|
||||
func (m *PolicyDelta) String() string { return proto.CompactTextString(m) }
|
||||
func (*PolicyDelta) ProtoMessage() {}
|
||||
func (*PolicyDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
|
||||
|
||||
func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta {
|
||||
if m != nil {
|
||||
return m.BindingDeltas
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// One delta entry for Binding. Each individual change (only one member in each
|
||||
// entry) to a binding will be a separate entry.
|
||||
type BindingDelta struct {
|
||||
// The action that was performed on a Binding.
|
||||
// Required
|
||||
Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"`
|
||||
// Role that is assigned to `members`.
|
||||
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
|
||||
// Required
|
||||
Role string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"`
|
||||
// A single identity requesting access for a Cloud Platform resource.
|
||||
// Follows the same format of Binding.members.
|
||||
// Required
|
||||
Member string `protobuf:"bytes,3,opt,name=member" json:"member,omitempty"`
|
||||
}
|
||||
|
||||
func (m *BindingDelta) Reset() { *m = BindingDelta{} }
|
||||
func (m *BindingDelta) String() string { return proto.CompactTextString(m) }
|
||||
func (*BindingDelta) ProtoMessage() {}
|
||||
func (*BindingDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
|
||||
|
||||
func (m *BindingDelta) GetAction() BindingDelta_Action {
|
||||
if m != nil {
|
||||
return m.Action
|
||||
}
|
||||
return BindingDelta_ACTION_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *BindingDelta) GetRole() string {
|
||||
if m != nil {
|
||||
return m.Role
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *BindingDelta) GetMember() string {
|
||||
if m != nil {
|
||||
return m.Member
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy")
|
||||
proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding")
|
||||
proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta")
|
||||
proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta")
|
||||
proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor1) }
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 387 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x8f, 0xd3, 0x30,
|
||||
0x10, 0xc5, 0xed, 0x92, 0xd2, 0xd9, 0x0f, 0x15, 0x23, 0x55, 0xd1, 0xc2, 0xa1, 0xca, 0x29, 0x27,
|
||||
0x87, 0x16, 0x21, 0x24, 0x38, 0x35, 0x4d, 0x40, 0x39, 0xb0, 0x1b, 0x0c, 0xec, 0x81, 0xcb, 0xca,
|
||||
0x69, 0x2d, 0xcb, 0x28, 0xb6, 0xa3, 0x24, 0x54, 0xe2, 0x2f, 0x21, 0xf1, 0xff, 0x38, 0xa2, 0xd8,
|
||||
0xee, 0xaa, 0x95, 0x10, 0xb7, 0x79, 0x79, 0xef, 0x65, 0xde, 0xcc, 0x18, 0xae, 0x85, 0x31, 0xa2,
|
||||
0xe6, 0x89, 0x64, 0x2a, 0xd9, 0x2f, 0x93, 0xc6, 0xd4, 0x72, 0xfb, 0x93, 0x34, 0xad, 0xe9, 0x0d,
|
||||
0xbe, 0x74, 0x1c, 0x91, 0x4c, 0x91, 0xfd, 0xf2, 0xfa, 0x85, 0x97, 0xb2, 0x46, 0x26, 0x4c, 0x6b,
|
||||
0xd3, 0xb3, 0x5e, 0x1a, 0xdd, 0x39, 0x71, 0xf4, 0x1d, 0x82, 0xd2, 0x9a, 0x71, 0x08, 0x93, 0x3d,
|
||||
0x6f, 0x3b, 0x69, 0x74, 0x88, 0x16, 0x28, 0x7e, 0x4c, 0x0f, 0x10, 0xaf, 0xe0, 0x49, 0x25, 0xf5,
|
||||
0x4e, 0x6a, 0xd1, 0x85, 0x67, 0x8b, 0x71, 0x7c, 0xbe, 0x9a, 0x93, 0x93, 0x1e, 0x24, 0x75, 0x34,
|
||||
0x7d, 0xd0, 0x61, 0x0c, 0x67, 0xbc, 0x67, 0x22, 0x1c, 0x2f, 0x50, 0x7c, 0x41, 0x6d, 0x1d, 0xbd,
|
||||
0x81, 0x89, 0x17, 0x0e, 0x74, 0x6b, 0x6a, 0x6e, 0x3b, 0x4d, 0xa9, 0xad, 0x87, 0x00, 0x8a, 0xab,
|
||||
0x8a, 0xb7, 0x5d, 0x38, 0x5a, 0x8c, 0xe3, 0x29, 0x3d, 0xc0, 0xe8, 0x13, 0x9c, 0xbb, 0x90, 0x19,
|
||||
0xaf, 0x7b, 0x86, 0x53, 0xb8, 0xf2, 0x7d, 0xee, 0x77, 0xc3, 0x87, 0x2e, 0x44, 0x36, 0xd5, 0xf3,
|
||||
0x7f, 0xa7, 0xb2, 0x26, 0x7a, 0x59, 0x1d, 0xa1, 0x2e, 0xfa, 0x8d, 0xe0, 0xe2, 0x98, 0xc7, 0x6f,
|
||||
0x21, 0x60, 0xdb, 0xfe, 0x30, 0xfd, 0xd5, 0x2a, 0xfa, 0xcf, 0xcf, 0xc8, 0xda, 0x2a, 0xa9, 0x77,
|
||||
0x3c, 0x4c, 0x33, 0x3a, 0x9a, 0x66, 0x0e, 0x81, 0x8b, 0x6f, 0x57, 0x30, 0xa5, 0x1e, 0x45, 0xaf,
|
||||
0x21, 0x70, 0x6e, 0x3c, 0x07, 0xbc, 0xde, 0x7c, 0x29, 0x6e, 0x6f, 0xee, 0xbf, 0xde, 0x7c, 0x2e,
|
||||
0xf3, 0x4d, 0xf1, 0xbe, 0xc8, 0xb3, 0xd9, 0x23, 0x3c, 0x81, 0xf1, 0x3a, 0xcb, 0x66, 0x08, 0x03,
|
||||
0x04, 0x34, 0xff, 0x78, 0x7b, 0x97, 0xcf, 0x46, 0xa9, 0x82, 0xa7, 0x5b, 0xa3, 0x4e, 0x33, 0xa5,
|
||||
0x7e, 0x2b, 0xe5, 0x70, 0xc9, 0x12, 0x7d, 0x7b, 0xe9, 0x59, 0x61, 0x6a, 0xa6, 0x05, 0x31, 0xad,
|
||||
0x48, 0x04, 0xd7, 0xf6, 0xce, 0x89, 0xa3, 0x58, 0x23, 0x3b, 0xff, 0x66, 0xde, 0x49, 0xa6, 0xfe,
|
||||
0x20, 0xf4, 0x6b, 0xf4, 0xec, 0x83, 0x73, 0x6d, 0x6a, 0xf3, 0x63, 0x47, 0x0a, 0xa6, 0xc8, 0xdd,
|
||||
0xb2, 0x0a, 0xac, 0xeb, 0xd5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x4a, 0x85, 0x10, 0x68,
|
||||
0x02, 0x00, 0x00,
|
||||
}
|
|
@ -8,12 +8,36 @@
|
|||
"revision": "81b7822b1e798e8f17bf64b59512a5be4097e966",
|
||||
"revisionTime": "2017-01-18T16:13:56Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "/ixPd+hSgsbAjBI/fPqmHtTFRM8=",
|
||||
"path": "cloud.google.com/go/iam",
|
||||
"revision": "eaddaf6dd7ee35fd3c2420c8d27478db176b0485",
|
||||
"revisionTime": "2017-10-03T12:23:38Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "4iounbuF7SMZdx/MlKSUuhnV848=",
|
||||
"path": "cloud.google.com/go/internal",
|
||||
"revision": "81b7822b1e798e8f17bf64b59512a5be4097e966",
|
||||
"revisionTime": "2017-01-18T16:13:56Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "MCns2LLZtUZEx6JWyYBrcbSuTXg=",
|
||||
"path": "cloud.google.com/go/internal/optional",
|
||||
"revision": "eaddaf6dd7ee35fd3c2420c8d27478db176b0485",
|
||||
"revisionTime": "2017-10-03T12:23:38Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "QXE70x1YpmwfX8bqcncO5LxjeEA=",
|
||||
"path": "cloud.google.com/go/internal/version",
|
||||
"revision": "eaddaf6dd7ee35fd3c2420c8d27478db176b0485",
|
||||
"revisionTime": "2017-10-03T12:23:38Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "T1qOkeLqtHSFaUsekS+scNweNO4=",
|
||||
"path": "cloud.google.com/go/storage",
|
||||
"revision": "eaddaf6dd7ee35fd3c2420c8d27478db176b0485",
|
||||
"revisionTime": "2017-10-03T12:23:38Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "mD5cAEaOLqhUeaFHbE8CLkZwM0M=",
|
||||
"path": "github.com/Azure/azure-sdk-for-go/arm/resources/resources",
|
||||
|
@ -1040,6 +1064,12 @@
|
|||
"revision": "8ee79997227bf9b34611aee7946ae64735e6fd93",
|
||||
"revisionTime": "2016-11-17T03:31:26Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "XNHQiRltA7NQJV0RvUroY+cf+zg=",
|
||||
"path": "github.com/golang/protobuf/protoc-gen-go/descriptor",
|
||||
"revision": "130e6b02ab059e7b717a096f397c5b60111cae74",
|
||||
"revisionTime": "2017-09-20T22:06:47Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5UJZd7Zyo40vk1OjMTy6LWjTcss=",
|
||||
"path": "github.com/golang/protobuf/ptypes",
|
||||
|
@ -2238,10 +2268,10 @@
|
|||
"revisionTime": "2016-12-14T09:25:55Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "rEzA1cW2NdfF9ndGQHTNzE5+mF4=",
|
||||
"checksumSHA1": "JTBn9MQUhwHtjwv7rC9Zg4KRN7g=",
|
||||
"path": "golang.org/x/oauth2/google",
|
||||
"revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
|
||||
"revisionTime": "2016-12-14T09:25:55Z"
|
||||
"revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44",
|
||||
"revisionTime": "2017-09-28T00:25:42Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "2/1PJ6nxegIPdmFoqugDSlpjEfQ=",
|
||||
|
@ -2401,10 +2431,10 @@
|
|||
"revisionTime": "2017-09-13T19:45:57Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "C7k1pbU/WU4CBoBwA4EBUnV/iek=",
|
||||
"checksumSHA1": "/y0saWnM+kTnSvZrNlvoNOgj0Uo=",
|
||||
"path": "google.golang.org/api/gensupport",
|
||||
"revision": "64485db7e8c8be51e572801d06cdbcfadd3546c1",
|
||||
"revisionTime": "2017-02-23T23:41:36Z"
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "yQREK/OWrz9PLljbr127+xFk6J0=",
|
||||
|
@ -2419,10 +2449,40 @@
|
|||
"revisionTime": "2016-08-05T04:28:55Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "xygm9BwoCg7vc0PPgAPdxNKJ38c=",
|
||||
"checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=",
|
||||
"path": "google.golang.org/api/googleapi/transport",
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "dENAVft6XToomTHrm5J2zFt4hgU=",
|
||||
"path": "google.golang.org/api/internal",
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "slcGOTGSdukEPPSN81Q5WZGmhog=",
|
||||
"path": "google.golang.org/api/iterator",
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Y3CG3ZFIYfF6AhvpiBMBAGcZMV4=",
|
||||
"path": "google.golang.org/api/option",
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "YrusV0OMylNg/6vMjCy51PzIryo=",
|
||||
"path": "google.golang.org/api/storage/v1",
|
||||
"revision": "3cc2e591b550923a2c5f0ab5a803feda924d5823",
|
||||
"revisionTime": "2016-11-27T23:54:21Z"
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "gZqIfbw6I/Cmw/+M278M2E7JzsU=",
|
||||
"path": "google.golang.org/api/transport/http",
|
||||
"revision": "7a7376eff6a51c6a053fcf8e9e50bf01a20f2673",
|
||||
"revisionTime": "2017-10-05T00:03:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "NU7Al7Ud5MQZxti3Pv6YgVrzLrM=",
|
||||
|
@ -2464,6 +2524,18 @@
|
|||
"path": "google.golang.org/appengine/internal/remote_api",
|
||||
"revision": "b667a5000b082e49c6c6d16867d376a12e9490cd"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "B22iMMY2vi1Q9kseWb/ZznpW8lQ=",
|
||||
"path": "google.golang.org/genproto/googleapis/api/annotations",
|
||||
"revision": "f676e0f3ac6395ff1a529ae59a6670878a8371a6",
|
||||
"revisionTime": "2017-10-02T23:26:14Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "m5IWVQJ4fVYc3b+5OrZ7BdNlvkA=",
|
||||
"path": "google.golang.org/genproto/googleapis/iam/v1",
|
||||
"revision": "f676e0f3ac6395ff1a529ae59a6670878a8371a6",
|
||||
"revisionTime": "2017-10-02T23:26:14Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "AvVpgwhxhJgjoSledwDtYrEKVE4=",
|
||||
"path": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
|
|
|
@ -8,9 +8,9 @@ description: |-
|
|||
|
||||
# gcs
|
||||
|
||||
**Kind: Standard (with no locking)**
|
||||
**Kind: Standard (with locking)**
|
||||
|
||||
Stores the state as a given key in a given bucket on [Google Cloud Storage](https://cloud.google.com/storage/).
|
||||
Stores the state as an object in a configurable prefix and bucket on [Google Cloud Storage](https://cloud.google.com/storage/) (GCS).
|
||||
|
||||
## Example Configuration
|
||||
|
||||
|
@ -18,8 +18,7 @@ Stores the state as a given key in a given bucket on [Google Cloud Storage](http
|
|||
terraform {
|
||||
backend "gcs" {
|
||||
bucket = "tf-state-prod"
|
||||
path = "path/terraform.tfstate"
|
||||
project = "myproject"
|
||||
prefix = "terraform/state"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -30,9 +29,8 @@ terraform {
|
|||
data "terraform_remote_state" "foo" {
|
||||
backend = "gcs"
|
||||
config {
|
||||
bucket = "terraform-state-prod"
|
||||
path = "network/terraform.tfstate"
|
||||
project = "goopro"
|
||||
bucket = "terraform-state"
|
||||
prefix = "prod"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,6 +47,15 @@ resource "template_file" "bar" {
|
|||
|
||||
The following configuration options are supported:
|
||||
|
||||
* `bucket` - (Required) The name of the GCS bucket
|
||||
* `path` - (Required) The path where to place/look for state file inside the bucket
|
||||
* `credentials` / `GOOGLE_CREDENTIALS` - (Required) Google Cloud Platform account credentials in json format
|
||||
* `bucket` - (Required) The name of the GCS bucket.
|
||||
This name must be globally unique.
|
||||
For more information, see [Bucket Naming Guidelines](https://cloud.google.com/storage/docs/bucketnaming.html#requirements).
|
||||
* `credentials` / `GOOGLE_CREDENTIALS` - (Optional) Local path to Google Cloud Platform account credentials in JSON format.
|
||||
If unset, [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) are used.
|
||||
The provided credentials need to have the `devstorage.read_write` scope and `WRITER` permissions on the bucket.
|
||||
* `prefix` - (Optional) GCS prefix inside the bucket. Named states are stored in an object called `<prefix>/<name>.tfstate`.
|
||||
* `path` - (Deprecated) GCS path to the state file of the default state. For backwards compatibility only, use `prefix` instead.
|
||||
* `project` / `GOOGLE_PROJECT` - (Optional) The project ID to which the bucket belongs. This is only used when creating a new bucket during initialization.
|
||||
Since buckets have globally unique names, the project ID is not required to access the bucket during normal operation.
|
||||
* `region` / `GOOGLE_REGION` - (Optional) The region in which a new bucket is created.
|
||||
For more information, see [Bucket Locations](https://cloud.google.com/storage/docs/bucket-locations).
|
||||
|
|
Loading…
Reference in New Issue