diff --git a/.travis.yml b/.travis.yml
index 04cc6f309..1c60d74d5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,7 +2,7 @@ dist: trusty
sudo: false
language: go
go:
-- 1.8
+- 1.8.x
# add TF_CONSUL_TEST=1 to run consul tests
# they were causing timouts in travis
@@ -25,7 +25,7 @@ install:
- bash scripts/gogetcookie.sh
- go get github.com/kardianos/govendor
script:
-- make vet vendor-status test
+- make vendor-status test vet
- GOOS=windows go build
branches:
only:
@@ -39,4 +39,4 @@ notifications:
matrix:
fast_finish: true
allow_failures:
- - go: tip
+ - go: tip
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 595974989..856b8d374 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,16 +3,57 @@
BACKWARDS INCOMPATIBILITIES / NOTES:
* provider/aws: Users of aws_cloudfront_distributions with custom_origins have been broken due to changes in the AWS API requiring `OriginReadTimeout` being set for updates. This has been fixed and will show as a change in terraform plan / apply. [GH-13367]
+* provider/aws: Users of China and Gov clouds, cannot use the new tagging of volumes created as part of aws_instances [GH-14055]
FEATURES:
- * **New Provider:** `gitlab` [GH-13898]
+* **New Provider:** `gitlab` [GH-13898]
+* **New Resource:** `aws_emr_security_configuration` [GH-14080]
+* **New Resource:** `azurerm_sql_elasticpool` [GH-14099]
+* **New Resource:** `google_compute_backend_bucket` [GH-14015]
+* **New Resource:** `google_compute_snapshot` [GH-12482]
+* **New Resource:** `heroku_app_feature` [GH-14035]
+* **New Resource:** `heroku_pipeline` [GH-14078]
+* **New Resource:** `heroku_pipeline_coupling` [GH-14078]
+* **New Resource:** `vault_auth_backend` [GH-10988]
+* **New Data Source:** `aws_efs_file_system` [GH-14041]
IMPROVEMENTS:
+* core: `sha512` and `base64sha512` interpolation functions, similar to their `sha256` equivalents. [GH-14100]
* provider/aws: Add support for CustomOrigin timeouts to aws_cloudfront_distribution [GH-13367]
+* provider/aws: Add support for IAMDatabaseAuthenticationEnabled [GH-14092]
+* provider/aws: aws_dynamodb_table Add support for TimeToLive [GH-14104]
+* provider/aws: Add `security_configuration` support to `aws_emr_cluster` [GH-14133]
+* provider/aws: Add support for the tenancy placement option in `aws_spot_fleet_request` [GH-14163]
+* provider/azurerm: `azurerm_template_deployment` now supports String/Int/Boolean outputs [GH-13670]
+* provider/azurerm: Expose the Private IP Address for a Load Balancer, if available [GH-13965]
* provider/dnsimple: Add support for import for dnsimple_records [GH-9130]
+* provider/google: Add support for networkIP in compute instance templates [GH-13515]
+* provider/google: google_dns_managed_zone is now importable [GH-13824]
+* provider/nomad: Add TLS options [GH-13956]
* provider/triton: Add support for reading provider configuration from `TRITON_*` environment variables in addition to `SDC_*`[GH-14000]
+* provider/triton: Add `cloud_config` argument to `triton_machine` resources for Linux containers [GH-12840]
+* provider/triton: Add `insecure_skip_tls_verify` [GH-14077]
+
+BUG FIXES:
+
+* core: `module` blocks without names are now caught in validation, along with various other block types [GH-14162]
+* provider/aws: Update aws_ebs_volume when attached [GH-14005]
+* provider/aws: Set aws_instance volume_tags to be Computed [GH-14007]
+* provider/aws: Fix issue getting partition for federated users [GH-13992]
+* provider/aws: aws_spot_instance_request not forcenew on volume_tags [GH-14046]
+* provider/aws: Exclude aws_instance volume tagging for China and Gov Clouds [GH-14055]
+* provider/aws: Fix source_dest_check with network_interface [GH-14079]
+* provider/aws: Fixes the bug where SNS delivery policy get always recreated [GH-14064]
+* provider/digitalocean: Prevent diffs when using IDs of images instead of slugs [GH-13879]
+* provider/fastly: Changes setting conditionals to optional [GH-14103]
+* provider/google: Ignore certain project services that can't be enabled directly via the api [GH-13730]
+* provider/google: Ability to add more than 25 project services [GH-13758]
+* provider/google: Fix compute instance panic with bad disk config [GH-14169]
+* providers/heroku: Configure buildpacks correctly for both Org Apps and non-org Apps [GH-13990]
+* provider/postgres grant role when creating database [GH-11452]
+* provisioner/remote-exec: Fix panic from remote_exec provisioner [GH-14134]
## 0.9.4 (26th April 2017)
diff --git a/Makefile b/Makefile
index 319492ef1..0eba369dc 100644
--- a/Makefile
+++ b/Makefile
@@ -75,8 +75,8 @@ cover:
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
- @echo "go vet ."
- @go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \
+ @echo 'go vet $$(go list ./... | grep -v /terraform/vendor/)'
+ @go vet $$(go list ./... | grep -v /terraform/vendor/) ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \
diff --git a/Vagrantfile b/Vagrantfile
index f59618e3b..368331868 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -5,7 +5,7 @@
VAGRANTFILE_API_VERSION = "2"
# Software version variables
-GOVERSION = "1.8"
+GOVERSION = "1.8.1"
UBUNTUVERSION = "16.04"
# CPU and RAM can be adjusted depending on your system
diff --git a/builtin/providers/aws/auth_helpers.go b/builtin/providers/aws/auth_helpers.go
index 1a73c6e8b..e808d4d39 100644
--- a/builtin/providers/aws/auth_helpers.go
+++ b/builtin/providers/aws/auth_helpers.go
@@ -54,7 +54,7 @@ func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string)
awsErr, ok := err.(awserr.Error)
// AccessDenied and ValidationError can be raised
// if credentials belong to federated profile, so we ignore these
- if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError") {
+ if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") {
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
}
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go
index 78fa93deb..327090130 100644
--- a/builtin/providers/aws/config.go
+++ b/builtin/providers/aws/config.go
@@ -171,6 +171,20 @@ func (c *AWSClient) DynamoDB() *dynamodb.DynamoDB {
return c.dynamodbconn
}
+func (c *AWSClient) IsGovCloud() bool {
+ if c.region == "us-gov-west-1" {
+ return true
+ }
+ return false
+}
+
+func (c *AWSClient) IsChinaCloud() bool {
+ if c.region == "cn-north-1" {
+ return true
+ }
+ return false
+}
+
// Client configures and returns a fully initialized AWSClient
func (c *Config) Client() (interface{}, error) {
// Get the auth and region. This can fail if keys/regions were not
diff --git a/builtin/providers/aws/data_source_aws_efs_file_system.go b/builtin/providers/aws/data_source_aws_efs_file_system.go
new file mode 100644
index 000000000..014ae1353
--- /dev/null
+++ b/builtin/providers/aws/data_source_aws_efs_file_system.go
@@ -0,0 +1,113 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/efs"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func dataSourceAwsEfsFileSystem() *schema.Resource {
+ return &schema.Resource{
+ Read: dataSourceAwsEfsFileSystemRead,
+
+ Schema: map[string]*schema.Schema{
+ "creation_token": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ ValidateFunc: validateMaxLength(64),
+ },
+ "file_system_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ },
+ "performance_mode": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "tags": tagsSchemaComputed(),
+ },
+ }
+}
+
+func dataSourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) error {
+ efsconn := meta.(*AWSClient).efsconn
+
+ describeEfsOpts := &efs.DescribeFileSystemsInput{}
+
+ if v, ok := d.GetOk("creation_token"); ok {
+ describeEfsOpts.CreationToken = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("file_system_id"); ok {
+ describeEfsOpts.FileSystemId = aws.String(v.(string))
+ }
+
+ describeResp, err := efsconn.DescribeFileSystems(describeEfsOpts)
+ if err != nil {
+ return errwrap.Wrapf("Error retrieving EFS: {{err}}", err)
+ }
+ if len(describeResp.FileSystems) != 1 {
+ return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.FileSystems))
+ }
+
+ d.SetId(*describeResp.FileSystems[0].FileSystemId)
+
+ tags := make([]*efs.Tag, 0)
+ var marker string
+ for {
+ params := &efs.DescribeTagsInput{
+ FileSystemId: aws.String(d.Id()),
+ }
+ if marker != "" {
+ params.Marker = aws.String(marker)
+ }
+
+ tagsResp, err := efsconn.DescribeTags(params)
+ if err != nil {
+ return fmt.Errorf("Error retrieving EC2 tags for EFS file system (%q): %s",
+ d.Id(), err.Error())
+ }
+
+ for _, tag := range tagsResp.Tags {
+ tags = append(tags, tag)
+ }
+
+ if tagsResp.NextMarker != nil {
+ marker = *tagsResp.NextMarker
+ } else {
+ break
+ }
+ }
+
+ err = d.Set("tags", tagsToMapEFS(tags))
+ if err != nil {
+ return err
+ }
+
+ var fs *efs.FileSystemDescription
+ for _, f := range describeResp.FileSystems {
+ if d.Id() == *f.FileSystemId {
+ fs = f
+ break
+ }
+ }
+ if fs == nil {
+ log.Printf("[WARN] EFS (%s) not found, removing from state", d.Id())
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("creation_token", fs.CreationToken)
+ d.Set("performance_mode", fs.PerformanceMode)
+ d.Set("file_system_id", fs.FileSystemId)
+
+ return nil
+}
diff --git a/builtin/providers/aws/data_source_aws_efs_file_system_test.go b/builtin/providers/aws/data_source_aws_efs_file_system_test.go
new file mode 100644
index 000000000..925e6afd8
--- /dev/null
+++ b/builtin/providers/aws/data_source_aws_efs_file_system_test.go
@@ -0,0 +1,71 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccDataSourceAwsEfsFileSystem(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccDataSourceAwsEfsFileSystemConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccDataSourceAwsEfsFileSystemCheck("data.aws_efs_file_system.by_creation_token"),
+ testAccDataSourceAwsEfsFileSystemCheck("data.aws_efs_file_system.by_id"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccDataSourceAwsEfsFileSystemCheck(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("root module has no resource called %s", name)
+ }
+
+ efsRs, ok := s.RootModule().Resources["aws_efs_file_system.test"]
+ if !ok {
+ return fmt.Errorf("can't find aws_efs_file_system.test in state")
+ }
+
+ attr := rs.Primary.Attributes
+
+ if attr["creation_token"] != efsRs.Primary.Attributes["creation_token"] {
+ return fmt.Errorf(
+ "creation_token is %s; want %s",
+ attr["creation_token"],
+ efsRs.Primary.Attributes["creation_token"],
+ )
+ }
+
+ if attr["id"] != efsRs.Primary.Attributes["id"] {
+ return fmt.Errorf(
+ "file_system_id is %s; want %s",
+ attr["id"],
+ efsRs.Primary.Attributes["id"],
+ )
+ }
+
+ return nil
+ }
+}
+
+const testAccDataSourceAwsEfsFileSystemConfig = `
+resource "aws_efs_file_system" "test" {}
+
+data "aws_efs_file_system" "by_creation_token" {
+ creation_token = "${aws_efs_file_system.test.creation_token}"
+}
+
+data "aws_efs_file_system" "by_id" {
+ file_system_id = "${aws_efs_file_system.test.id}"
+}
+`
diff --git a/builtin/providers/aws/import_aws_emr_security_configuration_test.go b/builtin/providers/aws/import_aws_emr_security_configuration_test.go
new file mode 100644
index 000000000..72ddddf51
--- /dev/null
+++ b/builtin/providers/aws/import_aws_emr_security_configuration_test.go
@@ -0,0 +1,28 @@
+package aws
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccAWSEmrSecurityConfiguration_importBasic(t *testing.T) {
+ resourceName := "aws_emr_security_configuration.foo"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckEmrSecurityConfigurationDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccEmrSecurityConfigurationConfig,
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 6f847fb26..e6165f33e 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -178,6 +178,7 @@ func Provider() terraform.ResourceProvider {
"aws_ecs_cluster": dataSourceAwsEcsCluster(),
"aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(),
"aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(),
+ "aws_efs_file_system": dataSourceAwsEfsFileSystem(),
"aws_eip": dataSourceAwsEip(),
"aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(),
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
@@ -313,6 +314,7 @@ func Provider() terraform.ResourceProvider {
"aws_elb_attachment": resourceAwsElbAttachment(),
"aws_emr_cluster": resourceAwsEMRCluster(),
"aws_emr_instance_group": resourceAwsEMRInstanceGroup(),
+ "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(),
"aws_flow_log": resourceAwsFlowLog(),
"aws_glacier_vault": resourceAwsGlacierVault(),
"aws_iam_access_key": resourceAwsIamAccessKey(),
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index 00409230d..8d4790289 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -335,6 +335,11 @@ func resourceAwsDbInstance() *schema.Resource {
ForceNew: true,
},
+ "iam_database_authentication_enabled": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+
"tags": tagsSchema(),
},
}
@@ -634,6 +639,10 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
opts.KmsKeyId = aws.String(attr.(string))
}
+ if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok {
+ opts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool))
+ }
+
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
var err error
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
@@ -710,6 +719,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
d.Set("multi_az", v.MultiAZ)
d.Set("kms_key_id", v.KmsKeyId)
d.Set("port", v.DbInstancePort)
+ d.Set("iam_database_authentication_enabled", v.IAMDatabaseAuthenticationEnabled)
if v.DBSubnetGroup != nil {
d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName)
}
@@ -994,6 +1004,11 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
requestUpdate = true
}
+ if d.HasChange("iam_database_authentication_enabled") {
+ req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool))
+ requestUpdate = true
+ }
+
log.Printf("[DEBUG] Send DB Instance Modification request: %t", requestUpdate)
if requestUpdate {
log.Printf("[DEBUG] DB Instance Modification request: %s", req)
diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go
index 17d3bf6b8..f7ea302c3 100644
--- a/builtin/providers/aws/resource_aws_db_instance_test.go
+++ b/builtin/providers/aws/resource_aws_db_instance_test.go
@@ -170,6 +170,27 @@ func TestAccAWSDBInstance_optionGroup(t *testing.T) {
})
}
+func TestAccAWSDBInstance_iamAuth(t *testing.T) {
+ var v rds.DBInstance
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSDBInstanceDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckAWSDBIAMAuth(acctest.RandInt()),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v),
+ testAccCheckAWSDBInstanceAttributes(&v),
+ resource.TestCheckResourceAttr(
+ "aws_db_instance.bar", "iam_database_authentication_enabled", "true"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSDBInstanceReplica(t *testing.T) {
var s, r rds.DBInstance
@@ -773,6 +794,24 @@ resource "aws_db_instance" "bar" {
}`, rName, acctest.RandInt())
}
+func testAccCheckAWSDBIAMAuth(n int) string {
+ return fmt.Sprintf(`
+resource "aws_db_instance" "bar" {
+ identifier = "foobarbaz-test-terraform-%d"
+ allocated_storage = 10
+ engine = "mysql"
+ engine_version = "5.6.34"
+ instance_class = "db.t2.micro"
+ name = "baz"
+ password = "barbarbarbar"
+ username = "foo"
+ backup_retention_period = 0
+ skip_final_snapshot = true
+ parameter_group_name = "default.mysql5.6"
+ iam_database_authentication_enabled = true
+}`, n)
+}
+
func testAccReplicaInstanceConfig(val int) string {
return fmt.Sprintf(`
resource "aws_db_instance" "bar" {
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index fff6775c1..155da08f9 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -92,6 +92,23 @@ func resourceAwsDynamoDbTable() *schema.Resource {
return hashcode.String(buf.String())
},
},
+ "ttl": {
+ Type: schema.TypeSet,
+ Optional: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "attribute_name": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "enabled": {
+ Type: schema.TypeBool,
+ Required: true,
+ },
+ },
+ },
+ },
"local_secondary_index": {
Type: schema.TypeSet,
Optional: true,
@@ -296,6 +313,7 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
log.Printf("[DEBUG] Adding StreamSpecifications to the table")
}
+ _, timeToLiveOk := d.GetOk("ttl")
_, tagsOk := d.GetOk("tags")
attemptCount := 1
@@ -326,12 +344,28 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
if err := d.Set("arn", tableArn); err != nil {
return err
}
+
+ // Wait, till table is active before imitating any TimeToLive changes
+ if err := waitForTableToBeActive(d.Id(), meta); err != nil {
+ log.Printf("[DEBUG] Error waiting for table to be active: %s", err)
+ return err
+ }
+
+ log.Printf("[DEBUG] Setting DynamoDB TimeToLive on arn: %s", tableArn)
+ if timeToLiveOk {
+ if err := updateTimeToLive(d, meta); err != nil {
+ log.Printf("[DEBUG] Error updating table TimeToLive: %s", err)
+ return err
+ }
+ }
+
if tagsOk {
log.Printf("[DEBUG] Setting DynamoDB Tags on arn: %s", tableArn)
if err := createTableTags(d, meta); err != nil {
return err
}
}
+
return resourceAwsDynamoDbTableRead(d, meta)
}
}
@@ -587,6 +621,13 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
}
+ if d.HasChange("ttl") {
+ if err := updateTimeToLive(d, meta); err != nil {
+ log.Printf("[DEBUG] Error updating table TimeToLive: %s", err)
+ return err
+ }
+ }
+
// Update tags
if err := setTagsDynamoDb(dynamodbconn, d); err != nil {
return err
@@ -595,6 +636,46 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
return resourceAwsDynamoDbTableRead(d, meta)
}
+func updateTimeToLive(d *schema.ResourceData, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+
+ if ttl, ok := d.GetOk("ttl"); ok {
+
+ timeToLiveSet := ttl.(*schema.Set)
+
+ spec := &dynamodb.TimeToLiveSpecification{}
+
+ timeToLive := timeToLiveSet.List()[0].(map[string]interface{})
+ spec.AttributeName = aws.String(timeToLive["attribute_name"].(string))
+ spec.Enabled = aws.Bool(timeToLive["enabled"].(bool))
+
+ req := &dynamodb.UpdateTimeToLiveInput{
+ TableName: aws.String(d.Id()),
+ TimeToLiveSpecification: spec,
+ }
+
+ _, err := dynamodbconn.UpdateTimeToLive(req)
+
+ if err != nil {
+ // If ttl was not set within the .tf file before and has now been added we still run this command to update
+ // But there has been no change so lets continue
+ if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" && awsErr.Message() == "TimeToLive is already disabled" {
+ return nil
+ }
+ log.Printf("[DEBUG] Error updating TimeToLive on table: %s", err)
+ return err
+ }
+
+ log.Printf("[DEBUG] Updated TimeToLive on table")
+
+ if err := waitForTimeToLiveUpdateToBeCompleted(d.Id(), timeToLive["enabled"].(bool), meta); err != nil {
+ return errwrap.Wrapf("Error waiting for Dynamo DB TimeToLive to be updated: {{err}}", err)
+ }
+ }
+
+ return nil
+}
+
func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error {
dynamodbconn := meta.(*AWSClient).dynamodbconn
log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id())
@@ -711,6 +792,23 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
d.Set("arn", table.TableArn)
+ timeToLiveReq := &dynamodb.DescribeTimeToLiveInput{
+ TableName: aws.String(d.Id()),
+ }
+ timeToLiveOutput, err := dynamodbconn.DescribeTimeToLive(timeToLiveReq)
+ if err != nil {
+ return err
+ }
+ timeToLive := []interface{}{}
+ attribute := map[string]*string{
+ "name": timeToLiveOutput.TimeToLiveDescription.AttributeName,
+ "type": timeToLiveOutput.TimeToLiveDescription.TimeToLiveStatus,
+ }
+ timeToLive = append(timeToLive, attribute)
+ d.Set("timeToLive", timeToLive)
+
+ log.Printf("[DEBUG] Loaded TimeToLive data for DynamoDB table '%s'", d.Id())
+
tags, err := readTableTags(d, meta)
if err != nil {
return err
@@ -910,6 +1008,39 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
}
+func waitForTimeToLiveUpdateToBeCompleted(tableName string, enabled bool, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+ req := &dynamodb.DescribeTimeToLiveInput{
+ TableName: aws.String(tableName),
+ }
+
+ stateMatched := false
+ for stateMatched == false {
+ result, err := dynamodbconn.DescribeTimeToLive(req)
+
+ if err != nil {
+ return err
+ }
+
+ if enabled {
+ stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled
+ } else {
+ stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusDisabled
+ }
+
+ // Wait for a few seconds, this may take a long time...
+ if !stateMatched {
+ log.Printf("[DEBUG] Sleeping for 5 seconds before checking TimeToLive state again")
+ time.Sleep(5 * time.Second)
+ }
+ }
+
+ log.Printf("[DEBUG] TimeToLive update complete")
+
+ return nil
+
+}
+
func createTableTags(d *schema.ResourceData, meta interface{}) error {
// DynamoDB Table has to be in the ACTIVE state in order to tag the resource
if err := waitForTableToBeActive(d.Id(), meta); err != nil {
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
index fe2ce175f..59cebc4a1 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
@@ -110,6 +110,71 @@ func TestAccAWSDynamoDbTable_gsiUpdate(t *testing.T) {
})
}
+func TestAccAWSDynamoDbTable_ttl(t *testing.T) {
+ var conf dynamodb.DescribeTableOutput
+
+ rName := acctest.RandomWithPrefix("TerraformTestTable-")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSDynamoDbTableDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAWSDynamoDbConfigInitialState(rName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf),
+ ),
+ },
+ {
+ Config: testAccAWSDynamoDbConfigAddTimeToLive(rName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckDynamoDbTableTimeToLiveWasUpdated("aws_dynamodb_table.basic-dynamodb-table"),
+ ),
+ },
+ },
+ })
+}
+func testAccCheckDynamoDbTableTimeToLiveWasUpdated(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ log.Printf("[DEBUG] Trying to create initial table state!")
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No DynamoDB table name specified!")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
+
+ params := &dynamodb.DescribeTimeToLiveInput{
+ TableName: aws.String(rs.Primary.ID),
+ }
+
+ resp, err := conn.DescribeTimeToLive(params)
+
+ if err != nil {
+ return fmt.Errorf("[ERROR] Problem describing time to live for table '%s': %s", rs.Primary.ID, err)
+ }
+
+ ttlDescription := resp.TimeToLiveDescription
+
+ log.Printf("[DEBUG] Checking on table %s", rs.Primary.ID)
+
+ if *ttlDescription.TimeToLiveStatus != dynamodb.TimeToLiveStatusEnabled {
+ return fmt.Errorf("TimeToLiveStatus %s, not ENABLED!", *ttlDescription.TimeToLiveStatus)
+ }
+
+ if *ttlDescription.AttributeName != "TestTTL" {
+ return fmt.Errorf("AttributeName was %s, not TestTTL!", *ttlDescription.AttributeName)
+ }
+
+ return nil
+ }
+}
+
func TestResourceAWSDynamoDbTableStreamViewType_validation(t *testing.T) {
cases := []struct {
Value string
@@ -678,3 +743,55 @@ resource "aws_dynamodb_table" "test" {
}
`, name)
}
+
+func testAccAWSDynamoDbConfigAddTimeToLive(rName string) string {
+ return fmt.Sprintf(`
+resource "aws_dynamodb_table" "basic-dynamodb-table" {
+ name = "%s"
+ read_capacity = 10
+ write_capacity = 20
+ hash_key = "TestTableHashKey"
+ range_key = "TestTableRangeKey"
+
+ attribute {
+ name = "TestTableHashKey"
+ type = "S"
+ }
+
+ attribute {
+ name = "TestTableRangeKey"
+ type = "S"
+ }
+
+ attribute {
+ name = "TestLSIRangeKey"
+ type = "N"
+ }
+
+ attribute {
+ name = "TestGSIRangeKey"
+ type = "S"
+ }
+
+ local_secondary_index {
+ name = "TestTableLSI"
+ range_key = "TestLSIRangeKey"
+ projection_type = "ALL"
+ }
+
+ ttl {
+ attribute_name = "TestTTL"
+ enabled = true
+ }
+
+ global_secondary_index {
+ name = "InitialTestTableGSI"
+ hash_key = "TestTableHashKey"
+ range_key = "TestGSIRangeKey"
+ write_capacity = 10
+ read_capacity = 10
+ projection_type = "KEYS_ONLY"
+ }
+}
+`, rName)
+}
diff --git a/builtin/providers/aws/resource_aws_ebs_volume.go b/builtin/providers/aws/resource_aws_ebs_volume.go
index 49d5281a1..1beda135e 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume.go
@@ -179,7 +179,7 @@ func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "modifying"},
- Target: []string{"available"},
+ Target: []string{"available", "in-use"},
Refresh: volumeStateRefreshFunc(conn, *result.VolumeModification.VolumeId),
Timeout: 5 * time.Minute,
Delay: 10 * time.Second,
diff --git a/builtin/providers/aws/resource_aws_ebs_volume_test.go b/builtin/providers/aws/resource_aws_ebs_volume_test.go
index bb98265a2..1c62247ed 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume_test.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume_test.go
@@ -30,6 +30,31 @@ func TestAccAWSEBSVolume_basic(t *testing.T) {
})
}
+func TestAccAWSEBSVolume_updateAttachedEbsVolume(t *testing.T) {
+ var v ec2.Volume
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ IDRefreshName: "aws_ebs_volume.test",
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAwsEbsAttachedVolumeConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVolumeExists("aws_ebs_volume.test", &v),
+ resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "10"),
+ ),
+ },
+ {
+ Config: testAccAwsEbsAttachedVolumeConfigUpdateSize,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVolumeExists("aws_ebs_volume.test", &v),
+ resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "20"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSEBSVolume_updateSize(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
@@ -200,6 +225,124 @@ resource "aws_ebs_volume" "test" {
}
`
+const testAccAwsEbsAttachedVolumeConfig = `
+data "aws_ami" "debian_jessie_latest" {
+ most_recent = true
+
+ filter {
+ name = "name"
+ values = ["debian-jessie-*"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+
+ filter {
+ name = "architecture"
+ values = ["x86_64"]
+ }
+
+ filter {
+ name = "root-device-type"
+ values = ["ebs"]
+ }
+
+ owners = ["379101102735"] # Debian
+}
+
+resource "aws_instance" "test" {
+ ami = "${data.aws_ami.debian_jessie_latest.id}"
+ associate_public_ip_address = true
+ count = 1
+ instance_type = "t2.medium"
+
+ root_block_device {
+ volume_size = "10"
+ volume_type = "standard"
+ delete_on_termination = true
+ }
+
+ tags {
+ Name = "test-terraform"
+ }
+}
+
+resource "aws_ebs_volume" "test" {
+ depends_on = ["aws_instance.test"]
+ availability_zone = "${aws_instance.test.availability_zone}"
+ type = "gp2"
+ size = "10"
+}
+
+resource "aws_volume_attachment" "test" {
+ depends_on = ["aws_ebs_volume.test"]
+ device_name = "/dev/xvdg"
+ volume_id = "${aws_ebs_volume.test.id}"
+ instance_id = "${aws_instance.test.id}"
+}
+`
+
+const testAccAwsEbsAttachedVolumeConfigUpdateSize = `
+data "aws_ami" "debian_jessie_latest" {
+ most_recent = true
+
+ filter {
+ name = "name"
+ values = ["debian-jessie-*"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+
+ filter {
+ name = "architecture"
+ values = ["x86_64"]
+ }
+
+ filter {
+ name = "root-device-type"
+ values = ["ebs"]
+ }
+
+ owners = ["379101102735"] # Debian
+}
+
+resource "aws_instance" "test" {
+ ami = "${data.aws_ami.debian_jessie_latest.id}"
+ associate_public_ip_address = true
+ count = 1
+ instance_type = "t2.medium"
+
+ root_block_device {
+ volume_size = "10"
+ volume_type = "standard"
+ delete_on_termination = true
+ }
+
+ tags {
+ Name = "test-terraform"
+ }
+}
+
+resource "aws_ebs_volume" "test" {
+ depends_on = ["aws_instance.test"]
+ availability_zone = "${aws_instance.test.availability_zone}"
+ type = "gp2"
+ size = "20"
+}
+
+resource "aws_volume_attachment" "test" {
+ depends_on = ["aws_ebs_volume.test"]
+ device_name = "/dev/xvdg"
+ volume_id = "${aws_ebs_volume.test.id}"
+ instance_id = "${aws_instance.test.id}"
+}
+`
+
const testAccAwsEbsVolumeConfigUpdateSize = `
resource "aws_ebs_volume" "test" {
availability_zone = "us-west-2a"
diff --git a/builtin/providers/aws/resource_aws_emr_cluster.go b/builtin/providers/aws/resource_aws_emr_cluster.go
index 62b138505..ee8868aae 100644
--- a/builtin/providers/aws/resource_aws_emr_cluster.go
+++ b/builtin/providers/aws/resource_aws_emr_cluster.go
@@ -157,6 +157,11 @@ func resourceAwsEMRCluster() *schema.Resource {
ForceNew: true,
Required: true,
},
+ "security_configuration": {
+ Type: schema.TypeString,
+ ForceNew: true,
+ Optional: true,
+ },
"autoscaling_role": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
@@ -268,6 +273,10 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error
params.AutoScalingRole = aws.String(v.(string))
}
+ if v, ok := d.GetOk("security_configuration"); ok {
+ params.SecurityConfiguration = aws.String(v.(string))
+ }
+
if instanceProfile != "" {
params.JobFlowRole = aws.String(instanceProfile)
}
@@ -361,6 +370,7 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("name", cluster.Name)
d.Set("service_role", cluster.ServiceRole)
+ d.Set("security_configuration", cluster.SecurityConfiguration)
d.Set("autoscaling_role", cluster.AutoScalingRole)
d.Set("release_label", cluster.ReleaseLabel)
d.Set("log_uri", cluster.LogUri)
diff --git a/builtin/providers/aws/resource_aws_emr_cluster_test.go b/builtin/providers/aws/resource_aws_emr_cluster_test.go
index 688c86f3f..9de404d20 100644
--- a/builtin/providers/aws/resource_aws_emr_cluster_test.go
+++ b/builtin/providers/aws/resource_aws_emr_cluster_test.go
@@ -30,6 +30,22 @@ func TestAccAWSEMRCluster_basic(t *testing.T) {
})
}
+func TestAccAWSEMRCluster_security_config(t *testing.T) {
+ var cluster emr.Cluster
+ r := acctest.RandInt()
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSEmrDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAWSEmrClusterConfig_SecurityConfiguration(r),
+ Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster),
+ },
+ },
+ })
+}
+
func TestAccAWSEMRCluster_bootstrap_ordering(t *testing.T) {
var cluster emr.Cluster
rName := acctest.RandomWithPrefix("tf-emr-bootstrap")
@@ -881,6 +897,356 @@ resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
`, r, r, r, r, r, r, r, r, r, r)
}
+func testAccAWSEmrClusterConfig_SecurityConfiguration(r int) string {
+ return fmt.Sprintf(`
+provider "aws" {
+ region = "us-west-2"
+}
+
+resource "aws_emr_cluster" "tf-test-cluster" {
+ name = "emr-test-%d"
+ release_label = "emr-5.5.0"
+ applications = ["Spark"]
+
+ ec2_attributes {
+ subnet_id = "${aws_subnet.main.id}"
+ emr_managed_master_security_group = "${aws_security_group.allow_all.id}"
+ emr_managed_slave_security_group = "${aws_security_group.allow_all.id}"
+ instance_profile = "${aws_iam_instance_profile.emr_profile.arn}"
+ }
+
+ master_instance_type = "m3.xlarge"
+ core_instance_type = "m3.xlarge"
+ core_instance_count = 1
+
+ security_configuration = "${aws_emr_security_configuration.foo.name}"
+
+ tags {
+ role = "rolename"
+ dns_zone = "env_zone"
+ env = "env"
+ name = "name-env"
+ }
+
+ keep_job_flow_alive_when_no_steps = true
+ termination_protection = false
+
+ bootstrap_action {
+ path = "s3://elasticmapreduce/bootstrap-actions/run-if"
+ name = "runif"
+ args = ["instance.isMaster=true", "echo running on master node"]
+ }
+
+ configurations = "test-fixtures/emr_configurations.json"
+
+ depends_on = ["aws_main_route_table_association.a"]
+
+ service_role = "${aws_iam_role.iam_emr_default_role.arn}"
+ autoscaling_role = "${aws_iam_role.emr-autoscaling-role.arn}"
+}
+
+resource "aws_security_group" "allow_all" {
+ name = "allow_all_%d"
+ description = "Allow all inbound traffic"
+ vpc_id = "${aws_vpc.main.id}"
+
+ ingress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ depends_on = ["aws_subnet.main"]
+
+ lifecycle {
+ ignore_changes = ["ingress", "egress"]
+ }
+
+ tags {
+ name = "emr_test"
+ }
+}
+
+resource "aws_vpc" "main" {
+ cidr_block = "168.31.0.0/16"
+ enable_dns_hostnames = true
+
+ tags {
+ name = "emr_test_%d"
+ }
+}
+
+resource "aws_subnet" "main" {
+ vpc_id = "${aws_vpc.main.id}"
+ cidr_block = "168.31.0.0/20"
+
+ tags {
+ name = "emr_test_%d"
+ }
+}
+
+resource "aws_internet_gateway" "gw" {
+ vpc_id = "${aws_vpc.main.id}"
+}
+
+resource "aws_route_table" "r" {
+ vpc_id = "${aws_vpc.main.id}"
+
+ route {
+ cidr_block = "0.0.0.0/0"
+ gateway_id = "${aws_internet_gateway.gw.id}"
+ }
+}
+
+resource "aws_main_route_table_association" "a" {
+ vpc_id = "${aws_vpc.main.id}"
+ route_table_id = "${aws_route_table.r.id}"
+}
+
+###
+
+# IAM things
+
+###
+
+# IAM role for EMR Service
+resource "aws_iam_role" "iam_emr_default_role" {
+ name = "iam_emr_default_role_%d"
+
+ assume_role_policy = < 10280 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 10280 characters", k))
+ }
+ return
+ },
+ },
+ "name_prefix": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if len(value) > 10000 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 10000 characters, name is limited to 10280", k))
+ }
+ return
+ },
+ },
+
+ "configuration": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validateJsonString,
+ },
+
+ "creation_date": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsEmrSecurityConfigurationCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).emrconn
+
+ var emrSCName string
+ if v, ok := d.GetOk("name"); ok {
+ emrSCName = v.(string)
+ } else {
+ if v, ok := d.GetOk("name_prefix"); ok {
+ emrSCName = resource.PrefixedUniqueId(v.(string))
+ } else {
+ emrSCName = resource.PrefixedUniqueId("tf-emr-sc-")
+ }
+ }
+
+ resp, err := conn.CreateSecurityConfiguration(&emr.CreateSecurityConfigurationInput{
+ Name: aws.String(emrSCName),
+ SecurityConfiguration: aws.String(d.Get("configuration").(string)),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ d.SetId(*resp.Name)
+ return resourceAwsEmrSecurityConfigurationRead(d, meta)
+}
+
+func resourceAwsEmrSecurityConfigurationRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).emrconn
+
+ resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{
+ Name: aws.String(d.Id()),
+ })
+ if err != nil {
+ if isAWSErr(err, "InvalidRequestException", "does not exist") {
+ log.Printf("[WARN] EMR Security Configuraiton (%s) not found, removing from state", d.Id())
+ d.SetId("")
+ return nil
+ }
+ return err
+ }
+
+ d.Set("creation_date", resp.CreationDateTime)
+ d.Set("name", resp.Name)
+ d.Set("configuration", resp.SecurityConfiguration)
+
+ return nil
+}
+
+func resourceAwsEmrSecurityConfigurationDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).emrconn
+
+ _, err := conn.DeleteSecurityConfiguration(&emr.DeleteSecurityConfigurationInput{
+ Name: aws.String(d.Id()),
+ })
+ if err != nil {
+ if isAWSErr(err, "InvalidRequestException", "does not exist") {
+ d.SetId("")
+ return nil
+ }
+ return err
+ }
+ d.SetId("")
+
+ return nil
+}
diff --git a/builtin/providers/aws/resource_aws_emr_security_configuration_test.go b/builtin/providers/aws/resource_aws_emr_security_configuration_test.go
new file mode 100644
index 000000000..c17fb806f
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_emr_security_configuration_test.go
@@ -0,0 +1,111 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/emr"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSEmrSecurityConfiguration_basic(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckEmrSecurityConfigurationDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccEmrSecurityConfigurationConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckEmrSecurityConfigurationExists("aws_emr_security_configuration.foo"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckEmrSecurityConfigurationDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).emrconn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_emr_security_configuration" {
+ continue
+ }
+
+ // Try to find the Security Configuration
+ resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{
+ Name: aws.String(rs.Primary.ID),
+ })
+ if err == nil {
+ if resp.Name != nil && *resp.Name == rs.Primary.ID {
+ // assume this means the resource still exists
+ return fmt.Errorf("Error: EMR Security Configuration still exists: %s", *resp.Name)
+ }
+ return nil
+ }
+
+ // Verify the error is what we want
+ if err != nil {
+ if isAWSErr(err, "InvalidRequestException", "does not exist") {
+ return nil
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckEmrSecurityConfigurationExists(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No EMR Security Configuration ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).emrconn
+ resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{
+ Name: aws.String(rs.Primary.ID),
+ })
+ if err != nil {
+ return err
+ }
+
+ if resp.Name == nil {
+ return fmt.Errorf("EMR Security Configuration had nil name which shouldn't happen")
+ }
+
+ if *resp.Name != rs.Primary.ID {
+ return fmt.Errorf("EMR Security Configuration name mismatch, got (%s), expected (%s)", *resp.Name, rs.Primary.ID)
+ }
+
+ return nil
+ }
+}
+
+const testAccEmrSecurityConfigurationConfig = `
+resource "aws_emr_security_configuration" "foo" {
+ configuration = < 0 {
- runOpts.TagSpecifications = tagsSpec
+ if len(tagsSpec) > 0 {
+ runOpts.TagSpecifications = tagsSpec
+ }
}
// Create the instance
@@ -639,6 +647,7 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
d.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId)
d.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil)
d.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses))
+ d.Set("source_dest_check", *primaryNetworkInterface.SourceDestCheck)
for _, address := range primaryNetworkInterface.Ipv6Addresses {
ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address)
@@ -713,19 +722,24 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
d.Partial(true)
- if d.HasChange("tags") && !d.IsNewResource() {
- if err := setTags(conn, d); err != nil {
- return err
- } else {
- d.SetPartial("tags")
+ restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud()
+
+ if d.HasChange("tags") {
+ if !d.IsNewResource() || !restricted {
+ if err := setTags(conn, d); err != nil {
+ return err
+ } else {
+ d.SetPartial("tags")
+ }
}
}
-
- if d.HasChange("volume_tags") && !d.IsNewResource() {
- if err := setVolumeTags(conn, d); err != nil {
- return err
- } else {
- d.SetPartial("volume_tags")
+ if d.HasChange("volume_tags") {
+ if !d.IsNewResource() || !restricted {
+ if err := setVolumeTags(conn, d); err != nil {
+ return err
+ } else {
+ d.SetPartial("volume_tags")
+ }
}
}
diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go
index 7c70f1cbd..f4131b09a 100644
--- a/builtin/providers/aws/resource_aws_instance_test.go
+++ b/builtin/providers/aws/resource_aws_instance_test.go
@@ -678,6 +678,25 @@ func TestAccAWSInstance_volumeTags(t *testing.T) {
})
}
+func TestAccAWSInstance_volumeTagsComputed(t *testing.T) {
+ var v ec2.Instance
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckInstanceDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckInstanceConfigWithAttachedVolume,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckInstanceExists("aws_instance.foo", &v),
+ ),
+ ExpectNonEmptyPlan: false,
+ },
+ },
+ })
+}
+
func TestAccAWSInstance_instanceProfileChange(t *testing.T) {
var v ec2.Instance
rName := acctest.RandString(5)
@@ -947,6 +966,27 @@ func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) {
})
}
+func TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) {
+ var instance ec2.Instance
+ var ini ec2.NetworkInterface
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckInstanceDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckInstanceExists("aws_instance.foo", &instance),
+ testAccCheckAWSENIExists("aws_network_interface.bar", &ini),
+ resource.TestCheckResourceAttr("aws_instance.foo", "source_dest_check", "false"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSInstance_addSecondaryInterface(t *testing.T) {
var before ec2.Instance
var after ec2.Instance
@@ -1382,6 +1422,69 @@ resource "aws_instance" "foo" {
}
`
+const testAccCheckInstanceConfigWithAttachedVolume = `
+data "aws_ami" "debian_jessie_latest" {
+ most_recent = true
+
+ filter {
+ name = "name"
+ values = ["debian-jessie-*"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+
+ filter {
+ name = "architecture"
+ values = ["x86_64"]
+ }
+
+ filter {
+ name = "root-device-type"
+ values = ["ebs"]
+ }
+
+ owners = ["379101102735"] # Debian
+}
+
+resource "aws_instance" "foo" {
+ ami = "${data.aws_ami.debian_jessie_latest.id}"
+ associate_public_ip_address = true
+ count = 1
+ instance_type = "t2.medium"
+
+ root_block_device {
+ volume_size = "10"
+ volume_type = "standard"
+ delete_on_termination = true
+ }
+
+ tags {
+ Name = "test-terraform"
+ }
+}
+
+resource "aws_ebs_volume" "test" {
+ depends_on = ["aws_instance.foo"]
+ availability_zone = "${aws_instance.foo.availability_zone}"
+ type = "gp2"
+ size = "10"
+
+ tags {
+ Name = "test-terraform"
+ }
+}
+
+resource "aws_volume_attachment" "test" {
+ depends_on = ["aws_ebs_volume.test"]
+ device_name = "/dev/xvdg"
+ volume_id = "${aws_ebs_volume.test.id}"
+ instance_id = "${aws_instance.foo.id}"
+}
+`
+
const testAccCheckInstanceConfigNoVolumeTags = `
resource "aws_instance" "foo" {
ami = "ami-55a7ea65"
@@ -1784,6 +1887,42 @@ resource "aws_instance" "foo" {
}
`
+const testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck = `
+resource "aws_vpc" "foo" {
+ cidr_block = "172.16.0.0/16"
+ tags {
+ Name = "tf-instance-test"
+ }
+}
+
+resource "aws_subnet" "foo" {
+ vpc_id = "${aws_vpc.foo.id}"
+ cidr_block = "172.16.10.0/24"
+ availability_zone = "us-west-2a"
+ tags {
+ Name = "tf-instance-test"
+ }
+}
+
+resource "aws_network_interface" "bar" {
+ subnet_id = "${aws_subnet.foo.id}"
+ private_ips = ["172.16.10.100"]
+ source_dest_check = false
+ tags {
+ Name = "primary_network_interface"
+ }
+}
+
+resource "aws_instance" "foo" {
+ ami = "ami-22b9a343"
+ instance_type = "t2.micro"
+ network_interface {
+ network_interface_id = "${aws_network_interface.bar.id}"
+ device_index = 0
+ }
+}
+`
+
const testAccInstanceConfigAddSecondaryNetworkInterfaceBefore = `
resource "aws_vpc" "foo" {
cidr_block = "172.16.0.0/16"
diff --git a/builtin/providers/aws/resource_aws_kms_key.go b/builtin/providers/aws/resource_aws_kms_key.go
index 2fa8e3287..f95f76d95 100644
--- a/builtin/providers/aws/resource_aws_kms_key.go
+++ b/builtin/providers/aws/resource_aws_kms_key.go
@@ -320,19 +320,33 @@ func updateKmsKeyStatus(conn *kms.KMS, id string, shouldBeEnabled bool) error {
}
func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error {
- var err error
shouldEnableRotation := d.Get("enable_key_rotation").(bool)
- if shouldEnableRotation {
- log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id())
- _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{
- KeyId: aws.String(d.Id()),
- })
- } else {
- log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id())
- _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{
- KeyId: aws.String(d.Id()),
- })
- }
+
+ err := resource.Retry(5*time.Minute, func() *resource.RetryError {
+ var err error
+ if shouldEnableRotation {
+ log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id())
+ _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{
+ KeyId: aws.String(d.Id()),
+ })
+ } else {
+ log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id())
+ _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{
+ KeyId: aws.String(d.Id()),
+ })
+ }
+
+ if err != nil {
+ awsErr, ok := err.(awserr.Error)
+ if ok && awsErr.Code() == "DisabledException" {
+ return resource.RetryableError(err)
+ }
+
+ return resource.NonRetryableError(err)
+ }
+
+ return nil
+ })
if err != nil {
return fmt.Errorf("Failed to set key rotation for %q to %t: %q",
diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go
index c33129654..7b134dade 100644
--- a/builtin/providers/aws/resource_aws_rds_cluster.go
+++ b/builtin/providers/aws/resource_aws_rds_cluster.go
@@ -216,6 +216,11 @@ func resourceAwsRDSCluster() *schema.Resource {
Optional: true,
},
+ "iam_database_authentication_enabled": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+
"tags": tagsSchema(),
},
}
@@ -428,6 +433,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.KmsKeyId = aws.String(attr.(string))
}
+ if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok {
+ createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool))
+ }
+
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
resp, err := conn.CreateDBCluster(createOpts)
if err != nil {
@@ -520,6 +529,7 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("kms_key_id", dbc.KmsKeyId)
d.Set("reader_endpoint", dbc.ReaderEndpoint)
d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier)
+ d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled)
var vpcg []string
for _, g := range dbc.VpcSecurityGroups {
@@ -594,6 +604,11 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error
requestUpdate = true
}
+ if d.HasChange("iam_database_authentication_enabled") {
+ req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool))
+ requestUpdate = true
+ }
+
if requestUpdate {
_, err := conn.ModifyDBCluster(req)
if err != nil {
diff --git a/builtin/providers/aws/resource_aws_rds_cluster_test.go b/builtin/providers/aws/resource_aws_rds_cluster_test.go
index 6acb72757..0a3c1d300 100644
--- a/builtin/providers/aws/resource_aws_rds_cluster_test.go
+++ b/builtin/providers/aws/resource_aws_rds_cluster_test.go
@@ -225,6 +225,26 @@ func TestAccAWSRDSCluster_backupsUpdate(t *testing.T) {
})
}
+func TestAccAWSRDSCluster_iamAuth(t *testing.T) {
+ var v rds.DBCluster
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSClusterDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAWSClusterConfig_iamAuth(acctest.RandInt()),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
+ resource.TestCheckResourceAttr(
+ "aws_rds_cluster.default", "iam_database_authentication_enabled", "true"),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_rds_cluster" {
@@ -550,3 +570,16 @@ resource "aws_rds_cluster" "default" {
skip_final_snapshot = true
}`, n)
}
+
+func testAccAWSClusterConfig_iamAuth(n int) string {
+ return fmt.Sprintf(`
+resource "aws_rds_cluster" "default" {
+ cluster_identifier = "tf-aurora-cluster-%d"
+ availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
+ database_name = "mydb"
+ master_username = "foo"
+ master_password = "mustbeeightcharaters"
+ iam_database_authentication_enabled = true
+ skip_final_snapshot = true
+}`, n)
+}
diff --git a/builtin/providers/aws/resource_aws_route53_record.go b/builtin/providers/aws/resource_aws_route53_record.go
index 0a80f7892..2e0d7e4c7 100644
--- a/builtin/providers/aws/resource_aws_route53_record.go
+++ b/builtin/providers/aws/resource_aws_route53_record.go
@@ -484,7 +484,7 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
}
}
- err = d.Set("records", flattenResourceRecords(record.ResourceRecords))
+ err = d.Set("records", flattenResourceRecords(record.ResourceRecords, *record.Type))
if err != nil {
return fmt.Errorf("[DEBUG] Error setting records for: %s, error: %#v", d.Id(), err)
}
diff --git a/builtin/providers/aws/resource_aws_sns_topic.go b/builtin/providers/aws/resource_aws_sns_topic.go
index f3320866a..63d308518 100644
--- a/builtin/providers/aws/resource_aws_sns_topic.go
+++ b/builtin/providers/aws/resource_aws_sns_topic.go
@@ -55,9 +55,15 @@ func resourceAwsSnsTopic() *schema.Resource {
},
},
"delivery_policy": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- ForceNew: false,
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: false,
+ ValidateFunc: validateJsonString,
+ DiffSuppressFunc: suppressEquivalentJsonDiffs,
+ StateFunc: func(v interface{}) string {
+ json, _ := normalizeJsonString(v)
+ return json
+ },
},
"arn": &schema.Schema{
Type: schema.TypeString,
diff --git a/builtin/providers/aws/resource_aws_sns_topic_test.go b/builtin/providers/aws/resource_aws_sns_topic_test.go
index 738614a25..c341c98b4 100644
--- a/builtin/providers/aws/resource_aws_sns_topic_test.go
+++ b/builtin/providers/aws/resource_aws_sns_topic_test.go
@@ -67,6 +67,25 @@ func TestAccAWSSNSTopic_withIAMRole(t *testing.T) {
})
}
+func TestAccAWSSNSTopic_withDeliveryPolicy(t *testing.T) {
+ expectedPolicy := `{"http":{"defaultHealthyRetryPolicy": {"minDelayTarget": 20,"maxDelayTarget": 20,"numMaxDelayRetries": 0,"numRetries": 3,"numNoDelayRetries": 0,"numMinDelayRetries": 0,"backoffFunction": "linear"},"disableSubscriptionOverrides": false}}`
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ IDRefreshName: "aws_sns_topic.test_topic",
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSSNSTopicDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSSNSTopicConfig_withDeliveryPolicy,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic"),
+ testAccCheckAWSNSTopicHasDeliveryPolicy("aws_sns_topic.test_topic", expectedPolicy),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckAWSNSTopicHasPolicy(n string, expectedPolicyText string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -117,6 +136,46 @@ func testAccCheckAWSNSTopicHasPolicy(n string, expectedPolicyText string) resour
}
}
+func testAccCheckAWSNSTopicHasDeliveryPolicy(n string, expectedPolicyText string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No Queue URL specified!")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).snsconn
+
+ params := &sns.GetTopicAttributesInput{
+ TopicArn: aws.String(rs.Primary.ID),
+ }
+ resp, err := conn.GetTopicAttributes(params)
+ if err != nil {
+ return err
+ }
+
+ var actualPolicyText string
+ for k, v := range resp.Attributes {
+ if k == "DeliveryPolicy" {
+ actualPolicyText = *v
+ break
+ }
+ }
+
+ equivalent := suppressEquivalentJsonDiffs("", actualPolicyText, expectedPolicyText, nil)
+
+ if !equivalent {
+ return fmt.Errorf("Non-equivalent delivery policy error:\n\nexpected: %s\n\n got: %s\n",
+ expectedPolicyText, actualPolicyText)
+ }
+
+ return nil
+ }
+}
+
func testAccCheckAWSSNSTopicDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).snsconn
@@ -244,3 +303,26 @@ resource "aws_sns_topic" "test_topic" {
EOF
}
`
+
+// Test for https://github.com/hashicorp/terraform/issues/14024
+const testAccAWSSNSTopicConfig_withDeliveryPolicy = `
+resource "aws_sns_topic" "test_topic" {
+ name = "test_delivery_policy"
+ delivery_policy = < 0 {
outputs = make(map[string]string)
for key, output := range *resp.Properties.Outputs {
+ log.Printf("[DEBUG] Processing deployment output %s", key)
outputMap := output.(map[string]interface{})
outputValue, ok := outputMap["value"]
if !ok {
- // No value
+ log.Printf("[DEBUG] No value - skipping")
+ continue
+ }
+ outputType, ok := outputMap["type"]
+ if !ok {
+ log.Printf("[DEBUG] No type - skipping")
continue
}
- outputs[key] = outputValue.(string)
+ var outputValueString string
+ switch strings.ToLower(outputType.(string)) {
+ case "bool":
+ outputValueString = strconv.FormatBool(outputValue.(bool))
+
+ case "string":
+ outputValueString = outputValue.(string)
+
+ case "int":
+ outputValueString = fmt.Sprint(outputValue)
+
+ default:
+ log.Printf("[WARN] Ignoring output %s: Outputs of type %s are not currently supported in azurerm_template_deployment.",
+ key, outputType)
+ continue
+ }
+ outputs[key] = outputValueString
}
}
- d.Set("outputs", outputs)
-
- return nil
+ return d.Set("outputs", outputs)
}
func resourceArmTemplateDeploymentDelete(d *schema.ResourceData, meta interface{}) error {
diff --git a/builtin/providers/azurerm/resource_arm_template_deployment_test.go b/builtin/providers/azurerm/resource_arm_template_deployment_test.go
index d69716d8b..40f0bea40 100644
--- a/builtin/providers/azurerm/resource_arm_template_deployment_test.go
+++ b/builtin/providers/azurerm/resource_arm_template_deployment_test.go
@@ -68,6 +68,29 @@ func TestAccAzureRMTemplateDeployment_withParams(t *testing.T) {
})
}
+func TestAccAzureRMTemplateDeployment_withOutputs(t *testing.T) {
+ ri := acctest.RandInt()
+ config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withOutputs, ri, ri, ri)
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"),
+ resource.TestCheckOutput("tfIntOutput", "-123"),
+ resource.TestCheckOutput("tfStringOutput", "Standard_GRS"),
+ resource.TestCheckOutput("tfFalseOutput", "false"),
+ resource.TestCheckOutput("tfTrueOutput", "true"),
+ resource.TestCheckResourceAttr("azurerm_template_deployment.test", "outputs.stringOutput", "Standard_GRS"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAzureRMTemplateDeployment_withError(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withError, ri, ri)
@@ -352,6 +375,126 @@ DEPLOY
`
+var testAccAzureRMTemplateDeployment_withOutputs = `
+ resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+ }
+
+ output "tfStringOutput" {
+ value = "${azurerm_template_deployment.test.outputs.stringOutput}"
+ }
+
+ output "tfIntOutput" {
+ value = "${azurerm_template_deployment.test.outputs.intOutput}"
+ }
+
+ output "tfFalseOutput" {
+ value = "${azurerm_template_deployment.test.outputs.falseOutput}"
+ }
+
+ output "tfTrueOutput" {
+ value = "${azurerm_template_deployment.test.outputs.trueOutput}"
+ }
+
+ resource "azurerm_template_deployment" "test" {
+ name = "acctesttemplate-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ template_body = < 0 {
- return fmt.Errorf("Error, metadata items still exist")
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "google_compute_project_metadata" {
+ continue
+ }
+
+ project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do()
+ if err == nil && len(project.CommonInstanceMetadata.Items) > 0 {
+ return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID)
+ }
}
return nil
}
-func testAccCheckComputeProjectExists(n string, project *compute.Project) resource.TestCheckFunc {
+func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@@ -126,8 +161,7 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour
config := testAccProvider.Meta().(*Config)
- found, err := config.clientCompute.Projects.Get(
- config.Project).Do()
+ found, err := config.clientCompute.Projects.Get(projectID).Do()
if err != nil {
return err
}
@@ -142,10 +176,10 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour
}
}
-func testAccCheckComputeProjectMetadataContains(project *compute.Project, key string, value string) resource.TestCheckFunc {
+func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
- project, err := config.clientCompute.Projects.Get(config.Project).Do()
+ project, err := config.clientCompute.Projects.Get(projectID).Do()
if err != nil {
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
}
@@ -161,14 +195,14 @@ func testAccCheckComputeProjectMetadataContains(project *compute.Project, key st
}
}
- return fmt.Errorf("Error, key %s not present", key)
+ return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink)
}
}
-func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) resource.TestCheckFunc {
+func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
- project, err := config.clientCompute.Projects.Get(config.Project).Do()
+ project, err := config.clientCompute.Projects.Get(projectID).Do()
if err != nil {
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
}
@@ -182,36 +216,100 @@ func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int)
}
}
-const testAccComputeProject_basic0_metadata = `
-resource "google_compute_project_metadata" "fizzbuzz" {
- metadata {
- banana = "orange"
- sofa = "darwinism"
- }
-}`
+func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {
+ return fmt.Sprintf(`
+resource "google_project" "project" {
+ project_id = "%s"
+ name = "%s"
+ org_id = "%s"
+ billing_account = "%s"
+}
-const testAccComputeProject_basic1_metadata = `
-resource "google_compute_project_metadata" "fizzbuzz" {
- metadata {
- kiwi = "papaya"
- finches = "darwinism"
- }
-}`
+resource "google_project_services" "services" {
+ project = "${google_project.project.project_id}"
+ services = ["compute-component.googleapis.com"]
+}
-const testAccComputeProject_modify0_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
- metadata {
- paper = "pen"
- genghis_khan = "french bread"
- happy = "smiling"
- }
-}`
+ project = "${google_project.project.project_id}"
+ metadata {
+ banana = "orange"
+ sofa = "darwinism"
+ }
+ depends_on = ["google_project_services.services"]
+}`, projectID, name, org, billing)
+}
+
+func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string {
+ return fmt.Sprintf(`
+resource "google_project" "project" {
+ project_id = "%s"
+ name = "%s"
+ org_id = "%s"
+ billing_account = "%s"
+}
+
+resource "google_project_services" "services" {
+ project = "${google_project.project.project_id}"
+ services = ["compute-component.googleapis.com"]
+}
-const testAccComputeProject_modify1_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
- metadata {
- paper = "pen"
- paris = "french bread"
- happy = "laughing"
- }
-}`
+ project = "${google_project.project.project_id}"
+ metadata {
+ kiwi = "papaya"
+ finches = "darwinism"
+ }
+ depends_on = ["google_project_services.services"]
+}`, projectID, name, org, billing)
+}
+
+func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string {
+ return fmt.Sprintf(`
+resource "google_project" "project" {
+ project_id = "%s"
+ name = "%s"
+ org_id = "%s"
+ billing_account = "%s"
+}
+
+resource "google_project_services" "services" {
+ project = "${google_project.project.project_id}"
+ services = ["compute-component.googleapis.com"]
+}
+
+resource "google_compute_project_metadata" "fizzbuzz" {
+ project = "${google_project.project.project_id}"
+ metadata {
+ paper = "pen"
+ genghis_khan = "french bread"
+ happy = "smiling"
+ }
+ depends_on = ["google_project_services.services"]
+}`, projectID, name, org, billing)
+}
+
+func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string {
+ return fmt.Sprintf(`
+resource "google_project" "project" {
+ project_id = "%s"
+ name = "%s"
+ org_id = "%s"
+ billing_account = "%s"
+}
+
+resource "google_project_services" "services" {
+ project = "${google_project.project.project_id}"
+ services = ["compute-component.googleapis.com"]
+}
+
+resource "google_compute_project_metadata" "fizzbuzz" {
+ project = "${google_project.project.project_id}"
+ metadata {
+ paper = "pen"
+ paris = "french bread"
+ happy = "laughing"
+ }
+ depends_on = ["google_project_services.services"]
+}`, projectID, name, org, billing)
+}
diff --git a/builtin/providers/google/resource_compute_snapshot.go b/builtin/providers/google/resource_compute_snapshot.go
new file mode 100644
index 000000000..e482c86f9
--- /dev/null
+++ b/builtin/providers/google/resource_compute_snapshot.go
@@ -0,0 +1,210 @@
+package google
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/compute/v1"
+ "google.golang.org/api/googleapi"
+)
+
+func resourceComputeSnapshot() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceComputeSnapshotCreate,
+ Read: resourceComputeSnapshotRead,
+ Delete: resourceComputeSnapshotDelete,
+ Exists: resourceComputeSnapshotExists,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "zone": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "snapshot_encryption_key_raw": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Sensitive: true,
+ },
+
+ "snapshot_encryption_key_sha256": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "source_disk_encryption_key_raw": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Sensitive: true,
+ },
+
+ "source_disk_encryption_key_sha256": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "source_disk": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "source_disk_link": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "project": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "self_link": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ project, err := getProject(d, config)
+ if err != nil {
+ return err
+ }
+
+ // Build the snapshot parameter
+ snapshot := &compute.Snapshot{
+ Name: d.Get("name").(string),
+ }
+
+ source_disk := d.Get("source_disk").(string)
+
+ if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok {
+ snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{}
+ snapshot.SnapshotEncryptionKey.RawKey = v.(string)
+ }
+
+ if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok {
+ snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{}
+ snapshot.SourceDiskEncryptionKey.RawKey = v.(string)
+ }
+
+ op, err := config.clientCompute.Disks.CreateSnapshot(
+ project, d.Get("zone").(string), source_disk, snapshot).Do()
+ if err != nil {
+ return fmt.Errorf("Error creating snapshot: %s", err)
+ }
+
+ // It probably maybe worked, so store the ID now
+ d.SetId(snapshot.Name)
+
+ err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Snapshot")
+ if err != nil {
+ return err
+ }
+ return resourceComputeSnapshotRead(d, meta)
+}
+
+func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ project, err := getProject(d, config)
+ if err != nil {
+ return err
+ }
+
+ snapshot, err := config.clientCompute.Snapshots.Get(
+ project, d.Id()).Do()
+ if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
+ return fmt.Errorf("Error reading snapshot: %s", err)
+ }
+
+ d.Set("self_link", snapshot.SelfLink)
+ d.Set("source_disk_link", snapshot.SourceDisk)
+ d.Set("name", snapshot.Name)
+
+ if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" {
+ d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256)
+ }
+
+ if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" {
+ d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256)
+ }
+
+ return nil
+}
+
+func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ project, err := getProject(d, config)
+ if err != nil {
+ return err
+ }
+
+ // Delete the snapshot
+ op, err := config.clientCompute.Snapshots.Delete(
+ project, d.Id()).Do()
+ if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+ return nil
+ }
+ return fmt.Errorf("Error deleting snapshot: %s", err)
+ }
+
+ err = computeOperationWaitGlobal(config, op, project, "Deleting Snapshot")
+ if err != nil {
+ return err
+ }
+
+ d.SetId("")
+ return nil
+}
+
+func resourceComputeSnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ config := meta.(*Config)
+
+ project, err := getProject(d, config)
+ if err != nil {
+ return false, err
+ }
+
+ _, err = config.clientCompute.Snapshots.Get(
+ project, d.Id()).Do()
+ if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return false, err
+ }
+ return true, err
+ }
+ return true, nil
+}
diff --git a/builtin/providers/google/resource_compute_snapshot_test.go b/builtin/providers/google/resource_compute_snapshot_test.go
new file mode 100644
index 000000000..2a29f940d
--- /dev/null
+++ b/builtin/providers/google/resource_compute_snapshot_test.go
@@ -0,0 +1,183 @@
+package google
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "google.golang.org/api/compute/v1"
+ "google.golang.org/api/googleapi"
+)
+
+func TestAccComputeSnapshot_basic(t *testing.T) {
+ snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ var snapshot compute.Snapshot
+ diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckComputeSnapshotDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccComputeSnapshot_basic(snapshotName, diskName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckComputeSnapshotExists(
+ "google_compute_snapshot.foobar", &snapshot),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccComputeSnapshot_encryption(t *testing.T) {
+ snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ var snapshot compute.Snapshot
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckComputeSnapshotDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckComputeSnapshotExists(
+ "google_compute_snapshot.foobar", &snapshot),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckComputeSnapshotDestroy(s *terraform.State) error {
+ config := testAccProvider.Meta().(*Config)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "google_compute_snapshot" {
+ continue
+ }
+
+ _, err := config.clientCompute.Snapshots.Get(
+ config.Project, rs.Primary.ID).Do()
+ if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ return nil
+ } else if ok {
+ return fmt.Errorf("Error while requesting Google Cloud Plateform: http code error : %d, http message error: %s", gerr.Code, gerr.Message)
+ }
+ return fmt.Errorf("Error while requesting Google Cloud Plateform")
+ }
+ return fmt.Errorf("Snapshot still exists")
+ }
+
+ return nil
+}
+
+func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ config := testAccProvider.Meta().(*Config)
+
+ found, err := config.clientCompute.Snapshots.Get(
+ config.Project, rs.Primary.ID).Do()
+ if err != nil {
+ return err
+ }
+
+ if found.Name != rs.Primary.ID {
+ return fmt.Errorf("Snapshot %s not found", n)
+ }
+
+ attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"]
+ if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr {
+ return fmt.Errorf("Snapshot %s has mismatched encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v",
+ n, attr, found.SnapshotEncryptionKey.Sha256)
+ } else if found.SnapshotEncryptionKey == nil && attr != "" {
+ return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
+ n, attr, found.SnapshotEncryptionKey)
+ }
+
+ attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"]
+ if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr {
+ return fmt.Errorf("Snapshot %s has mismatched source disk encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v",
+ n, attr, found.SourceDiskEncryptionKey.Sha256)
+ } else if found.SourceDiskEncryptionKey == nil && attr != "" {
+ return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v",
+ n, attr, found.SourceDiskEncryptionKey)
+ }
+
+ attr = rs.Primary.Attributes["source_disk_link"]
+ if found.SourceDisk != attr {
+ return fmt.Errorf("Snapshot %s has mismatched source disk link.\nTF State: %+v.\nGCP State: %+v",
+ n, attr, found.SourceDisk)
+ }
+
+ foundDisk, errDisk := config.clientCompute.Disks.Get(
+ config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["source_disk"]).Do()
+ if errDisk != nil {
+ return errDisk
+ }
+ if foundDisk.SelfLink != attr {
+ return fmt.Errorf("Snapshot %s has mismatched source disk\nTF State: %+v.\nGCP State: %+v",
+ n, attr, foundDisk.SelfLink)
+ }
+
+ attr = rs.Primary.Attributes["self_link"]
+ if found.SelfLink != attr {
+ return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v",
+ n, attr, found.SelfLink)
+ }
+
+ *snapshot = *found
+
+ return nil
+ }
+}
+
+func testAccComputeSnapshot_basic(snapshotName string, diskName string) string {
+ return fmt.Sprintf(`
+resource "google_compute_disk" "foobar" {
+ name = "%s"
+ image = "debian-8-jessie-v20160921"
+ size = 10
+ type = "pd-ssd"
+ zone = "us-central1-a"
+}
+
+resource "google_compute_snapshot" "foobar" {
+ name = "%s"
+ source_disk = "${google_compute_disk.foobar.name}"
+ zone = "us-central1-a"
+}`, diskName, snapshotName)
+}
+
+func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string {
+ return fmt.Sprintf(`
+resource "google_compute_disk" "foobar" {
+ name = "%s"
+ image = "debian-8-jessie-v20160921"
+ size = 10
+ type = "pd-ssd"
+ zone = "us-central1-a"
+ disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
+}
+resource "google_compute_snapshot" "foobar" {
+ name = "%s"
+ source_disk = "${google_compute_disk.foobar.name}"
+ zone = "us-central1-a"
+ source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
+ snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
+}`, diskName, snapshotName)
+}
diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go
index 1c26dfefe..236785e43 100644
--- a/builtin/providers/google/resource_container_cluster_test.go
+++ b/builtin/providers/google/resource_container_cluster_test.go
@@ -403,7 +403,7 @@ var testAccContainerCluster_withVersion = fmt.Sprintf(`
resource "google_container_cluster" "with_version" {
name = "cluster-test-%s"
zone = "us-central1-a"
- node_version = "1.6.0"
+ node_version = "1.6.1"
initial_node_count = 1
master_auth {
diff --git a/builtin/providers/google/resource_dns_managed_zone.go b/builtin/providers/google/resource_dns_managed_zone.go
index 8181e278b..f35e7dd87 100644
--- a/builtin/providers/google/resource_dns_managed_zone.go
+++ b/builtin/providers/google/resource_dns_managed_zone.go
@@ -14,7 +14,9 @@ func resourceDnsManagedZone() *schema.Resource {
Create: resourceDnsManagedZoneCreate,
Read: resourceDnsManagedZoneRead,
Delete: resourceDnsManagedZoneDelete,
-
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
Schema: map[string]*schema.Schema{
"dns_name": &schema.Schema{
Type: schema.TypeString,
@@ -109,6 +111,9 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error
}
d.Set("name_servers", zone.NameServers)
+ d.Set("name", zone.Name)
+ d.Set("dns_name", zone.DnsName)
+ d.Set("description", zone.Description)
return nil
}
diff --git a/builtin/providers/google/resource_google_project_services.go b/builtin/providers/google/resource_google_project_services.go
index 84bcd95ad..3a9c66730 100644
--- a/builtin/providers/google/resource_google_project_services.go
+++ b/builtin/providers/google/resource_google_project_services.go
@@ -31,6 +31,14 @@ func resourceGoogleProjectServices() *schema.Resource {
}
}
+// These services can only be enabled as a side-effect of enabling other services,
+// so don't bother storing them in the config or using them for diffing.
+var ignore = map[string]struct{}{
+ "containeranalysis.googleapis.com": struct{}{},
+ "dataproc-control.googleapis.com": struct{}{},
+ "source.googleapis.com": struct{}{},
+}
+
func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
pid := d.Get("project").(string)
@@ -155,12 +163,19 @@ func getConfigServices(d *schema.ResourceData) (services []string) {
func getApiServices(pid string, config *Config) ([]string, error) {
apiServices := make([]string, 0)
// Get services from the API
- svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).Do()
- if err != nil {
- return apiServices, err
- }
- for _, v := range svcResp.Services {
- apiServices = append(apiServices, v.ServiceName)
+ token := ""
+ for paginate := true; paginate; {
+ svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).PageToken(token).Do()
+ if err != nil {
+ return apiServices, err
+ }
+ for _, v := range svcResp.Services {
+ if _, ok := ignore[v.ServiceName]; !ok {
+ apiServices = append(apiServices, v.ServiceName)
+ }
+ }
+ token = svcResp.NextPageToken
+ paginate = token != ""
}
return apiServices, nil
}
diff --git a/builtin/providers/google/resource_google_project_services_test.go b/builtin/providers/google/resource_google_project_services_test.go
index dff073b28..e8af051cd 100644
--- a/builtin/providers/google/resource_google_project_services_test.go
+++ b/builtin/providers/google/resource_google_project_services_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"log"
+ "os"
"reflect"
"sort"
"testing"
@@ -123,6 +124,103 @@ func TestAccGoogleProjectServices_authoritative2(t *testing.T) {
})
}
+// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com)
+// don't end up causing diffs when they are enabled as a side-effect of a different service's
+// enablement.
+func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) {
+ skipIfEnvNotSet(t,
+ []string{
+ "GOOGLE_ORG",
+ "GOOGLE_BILLING_ACCOUNT",
+ }...,
+ )
+
+ billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
+ pid := "terraform-" + acctest.RandString(10)
+ services := []string{
+ "dataproc.googleapis.com",
+ // The following services are enabled as a side-effect of dataproc's enablement
+ "storage-component.googleapis.com",
+ "deploymentmanager.googleapis.com",
+ "replicapool.googleapis.com",
+ "replicapoolupdater.googleapis.com",
+ "resourceviews.googleapis.com",
+ "compute-component.googleapis.com",
+ "container.googleapis.com",
+ "containerregistry.googleapis.com",
+ "storage-api.googleapis.com",
+ "pubsub.googleapis.com",
+ }
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
+ Check: resource.ComposeTestCheckFunc(
+ testProjectServicesMatch(services, pid),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccGoogleProjectServices_manyServices(t *testing.T) {
+ skipIfEnvNotSet(t,
+ []string{
+ "GOOGLE_ORG",
+ "GOOGLE_BILLING_ACCOUNT",
+ }...,
+ )
+
+ billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
+ pid := "terraform-" + acctest.RandString(10)
+ services := []string{
+ "bigquery-json.googleapis.com",
+ "cloudbuild.googleapis.com",
+ "cloudfunctions.googleapis.com",
+ "cloudresourcemanager.googleapis.com",
+ "cloudtrace.googleapis.com",
+ "compute-component.googleapis.com",
+ "container.googleapis.com",
+ "containerregistry.googleapis.com",
+ "dataflow.googleapis.com",
+ "dataproc.googleapis.com",
+ "deploymentmanager.googleapis.com",
+ "dns.googleapis.com",
+ "endpoints.googleapis.com",
+ "iam.googleapis.com",
+ "logging.googleapis.com",
+ "ml.googleapis.com",
+ "monitoring.googleapis.com",
+ "pubsub.googleapis.com",
+ "replicapool.googleapis.com",
+ "replicapoolupdater.googleapis.com",
+ "resourceviews.googleapis.com",
+ "runtimeconfig.googleapis.com",
+ "servicecontrol.googleapis.com",
+ "servicemanagement.googleapis.com",
+ "sourcerepo.googleapis.com",
+ "spanner.googleapis.com",
+ "storage-api.googleapis.com",
+ "storage-component.googleapis.com",
+ }
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
+ Check: resource.ComposeTestCheckFunc(
+ testProjectServicesMatch(services, pid),
+ ),
+ },
+ },
+ })
+}
+
func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string {
return fmt.Sprintf(`
resource "google_project" "acceptance" {
@@ -137,6 +235,21 @@ resource "google_project_services" "acceptance" {
`, pid, name, org, testStringsToString(services))
}
+func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string {
+ return fmt.Sprintf(`
+resource "google_project" "acceptance" {
+ project_id = "%s"
+ name = "%s"
+ org_id = "%s"
+ billing_account = "%s"
+}
+resource "google_project_services" "acceptance" {
+ project = "${google_project.acceptance.project_id}"
+ services = [%s]
+}
+`, pid, name, org, billing, testStringsToString(services))
+}
+
func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
diff --git a/builtin/providers/heroku/provider.go b/builtin/providers/heroku/provider.go
index 6a8c9b986..fec57ca59 100644
--- a/builtin/providers/heroku/provider.go
+++ b/builtin/providers/heroku/provider.go
@@ -1,7 +1,9 @@
package heroku
import (
+ "fmt"
"log"
+ "strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
@@ -25,12 +27,15 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
- "heroku_addon": resourceHerokuAddon(),
- "heroku_app": resourceHerokuApp(),
- "heroku_cert": resourceHerokuCert(),
- "heroku_domain": resourceHerokuDomain(),
- "heroku_drain": resourceHerokuDrain(),
- "heroku_space": resourceHerokuSpace(),
+ "heroku_addon": resourceHerokuAddon(),
+ "heroku_app": resourceHerokuApp(),
+ "heroku_app_feature": resourceHerokuAppFeature(),
+ "heroku_cert": resourceHerokuCert(),
+ "heroku_domain": resourceHerokuDomain(),
+ "heroku_drain": resourceHerokuDrain(),
+ "heroku_pipeline": resourceHerokuPipeline(),
+ "heroku_pipeline_coupling": resourceHerokuPipelineCoupling(),
+ "heroku_space": resourceHerokuSpace(),
},
ConfigureFunc: providerConfigure,
@@ -46,3 +51,12 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
log.Println("[INFO] Initializing Heroku client")
return config.Client()
}
+
+func buildCompositeID(a, b string) string {
+ return fmt.Sprintf("%s:%s", a, b)
+}
+
+func parseCompositeID(id string) (string, string) {
+ parts := strings.SplitN(id, ":", 2)
+ return parts[0], parts[1]
+}
diff --git a/builtin/providers/heroku/resource_heroku_app.go b/builtin/providers/heroku/resource_heroku_app.go
index 93efa6ada..820296325 100644
--- a/builtin/providers/heroku/resource_heroku_app.go
+++ b/builtin/providers/heroku/resource_heroku_app.go
@@ -232,15 +232,8 @@ func resourceHerokuAppCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(a.Name)
log.Printf("[INFO] App ID: %s", d.Id())
- if v, ok := d.GetOk("config_vars"); ok {
- err = updateConfigVars(d.Id(), client, nil, v.([]interface{}))
- if err != nil {
- return err
- }
- }
-
- if v, ok := d.GetOk("buildpacks"); ok {
- err = updateBuildpacks(d.Id(), client, v.([]interface{}))
+ if err := performAppPostCreateTasks(d, client); err != nil {
+ return err
}
return resourceHerokuAppRead(d, meta)
@@ -305,11 +298,8 @@ func resourceHerokuOrgAppCreate(d *schema.ResourceData, meta interface{}) error
d.SetId(a.Name)
log.Printf("[INFO] App ID: %s", d.Id())
- if v, ok := d.GetOk("config_vars"); ok {
- err = updateConfigVars(d.Id(), client, nil, v.([]interface{}))
- if err != nil {
- return err
- }
+ if err := performAppPostCreateTasks(d, client); err != nil {
+ return err
}
return resourceHerokuAppRead(d, meta)
@@ -534,3 +524,20 @@ func updateBuildpacks(id string, client *heroku.Service, v []interface{}) error
return nil
}
+
+// performAppPostCreateTasks performs post-create tasks common to both org and non-org apps.
+func performAppPostCreateTasks(d *schema.ResourceData, client *heroku.Service) error {
+ if v, ok := d.GetOk("config_vars"); ok {
+ if err := updateConfigVars(d.Id(), client, nil, v.([]interface{})); err != nil {
+ return err
+ }
+ }
+
+ if v, ok := d.GetOk("buildpacks"); ok {
+ if err := updateBuildpacks(d.Id(), client, v.([]interface{})); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/builtin/providers/heroku/resource_heroku_app_feature.go b/builtin/providers/heroku/resource_heroku_app_feature.go
new file mode 100644
index 000000000..9718fdc67
--- /dev/null
+++ b/builtin/providers/heroku/resource_heroku_app_feature.go
@@ -0,0 +1,101 @@
+package heroku
+
+import (
+ "context"
+ "log"
+
+ heroku "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceHerokuAppFeature() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceHerokuAppFeatureCreate,
+ Update: resourceHerokuAppFeatureUpdate,
+ Read: resourceHerokuAppFeatureRead,
+ Delete: resourceHerokuAppFeatureDelete,
+
+ Schema: map[string]*schema.Schema{
+ "app": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "enabled": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ },
+ },
+ }
+}
+
+func resourceHerokuAppFeatureRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ app, id := parseCompositeID(d.Id())
+
+ feature, err := client.AppFeatureInfo(context.TODO(), app, id)
+ if err != nil {
+ return err
+ }
+
+ d.Set("app", app)
+ d.Set("name", feature.Name)
+ d.Set("enabled", feature.Enabled)
+
+ return nil
+}
+
+func resourceHerokuAppFeatureCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ app := d.Get("app").(string)
+ featureName := d.Get("name").(string)
+ enabled := d.Get("enabled").(bool)
+
+ opts := heroku.AppFeatureUpdateOpts{Enabled: enabled}
+
+ log.Printf("[DEBUG] Feature set configuration: %#v, %#v", featureName, opts)
+
+ feature, err := client.AppFeatureUpdate(context.TODO(), app, featureName, opts)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(buildCompositeID(app, feature.ID))
+
+ return resourceHerokuAppFeatureRead(d, meta)
+}
+
+func resourceHerokuAppFeatureUpdate(d *schema.ResourceData, meta interface{}) error {
+ if d.HasChange("enabled") {
+ return resourceHerokuAppFeatureCreate(d, meta)
+ }
+
+ return resourceHerokuAppFeatureRead(d, meta)
+}
+
+func resourceHerokuAppFeatureDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ app, id := parseCompositeID(d.Id())
+ featureName := d.Get("name").(string)
+
+ log.Printf("[INFO] Deleting app feature %s (%s) for app %s", featureName, id, app)
+ opts := heroku.AppFeatureUpdateOpts{Enabled: false}
+ _, err := client.AppFeatureUpdate(context.TODO(), app, id, opts)
+ if err != nil {
+ return err
+ }
+
+ d.SetId("")
+ return nil
+}
diff --git a/builtin/providers/heroku/resource_heroku_app_feature_test.go b/builtin/providers/heroku/resource_heroku_app_feature_test.go
new file mode 100644
index 000000000..870216ae9
--- /dev/null
+++ b/builtin/providers/heroku/resource_heroku_app_feature_test.go
@@ -0,0 +1,135 @@
+package heroku
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ heroku "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccHerokuAppFeature(t *testing.T) {
+ var feature heroku.AppFeatureInfoResult
+ appName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckHerokuFeatureDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckHerokuFeature_basic(appName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckHerokuFeatureExists("heroku_app_feature.runtime_metrics", &feature),
+ testAccCheckHerokuFeatureEnabled(&feature, true),
+ resource.TestCheckResourceAttr(
+ "heroku_app_feature.runtime_metrics", "enabled", "true",
+ ),
+ ),
+ },
+ {
+ Config: testAccCheckHerokuFeature_disabled(appName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckHerokuFeatureExists("heroku_app_feature.runtime_metrics", &feature),
+ testAccCheckHerokuFeatureEnabled(&feature, false),
+ resource.TestCheckResourceAttr(
+ "heroku_app_feature.runtime_metrics", "enabled", "false",
+ ),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckHerokuFeatureDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "heroku_app_feature" {
+ continue
+ }
+
+ _, err := client.AppFeatureInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID)
+
+ if err == nil {
+ return fmt.Errorf("Feature still exists")
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckHerokuFeatureExists(n string, feature *heroku.AppFeatureInfoResult) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No feature ID is set")
+ }
+
+ app, id := parseCompositeID(rs.Primary.ID)
+ if app != rs.Primary.Attributes["app"] {
+ return fmt.Errorf("Bad app: %s", app)
+ }
+
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ foundFeature, err := client.AppFeatureInfo(context.TODO(), app, id)
+ if err != nil {
+ return err
+ }
+
+ if foundFeature.ID != id {
+ return fmt.Errorf("Feature not found")
+ }
+
+ *feature = *foundFeature
+ return nil
+ }
+}
+
+func testAccCheckHerokuFeatureEnabled(feature *heroku.AppFeatureInfoResult, enabled bool) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ if feature.Enabled != enabled {
+ return fmt.Errorf("Bad enabled: %v", feature.Enabled)
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckHerokuFeature_basic(appName string) string {
+ return fmt.Sprintf(`
+resource "heroku_app" "example" {
+ name = "%s"
+ region = "us"
+}
+
+resource "heroku_app_feature" "runtime_metrics" {
+ app = "${heroku_app.example.name}"
+ name = "log-runtime-metrics"
+}
+`, appName)
+}
+
+func testAccCheckHerokuFeature_disabled(appName string) string {
+ return fmt.Sprintf(`
+resource "heroku_app" "example" {
+ name = "%s"
+ region = "us"
+}
+
+resource "heroku_app_feature" "runtime_metrics" {
+ app = "${heroku_app.example.name}"
+ name = "log-runtime-metrics"
+ enabled = false
+}
+`, appName)
+}
diff --git a/builtin/providers/heroku/resource_heroku_pipeline.go b/builtin/providers/heroku/resource_heroku_pipeline.go
new file mode 100644
index 000000000..5aedf33de
--- /dev/null
+++ b/builtin/providers/heroku/resource_heroku_pipeline.go
@@ -0,0 +1,92 @@
+package heroku
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceHerokuPipeline() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceHerokuPipelineCreate,
+ Update: resourceHerokuPipelineUpdate,
+ Read: resourceHerokuPipelineRead,
+ Delete: resourceHerokuPipelineDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ }
+}
+
+func resourceHerokuPipelineCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ opts := heroku.PipelineCreateOpts{
+ Name: d.Get("name").(string),
+ }
+
+ log.Printf("[DEBUG] Pipeline create configuration: %#v", opts)
+
+ p, err := client.PipelineCreate(context.TODO(), opts)
+ if err != nil {
+ return fmt.Errorf("Error creating pipeline: %s", err)
+ }
+
+ d.SetId(p.ID)
+ d.Set("name", p.Name)
+
+ log.Printf("[INFO] Pipeline ID: %s", d.Id())
+
+ return resourceHerokuPipelineUpdate(d, meta)
+}
+
+func resourceHerokuPipelineUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ if d.HasChange("name") {
+ name := d.Get("name").(string)
+ opts := heroku.PipelineUpdateOpts{
+ Name: &name,
+ }
+
+ _, err := client.PipelineUpdate(context.TODO(), d.Id(), opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ return resourceHerokuPipelineRead(d, meta)
+}
+
+func resourceHerokuPipelineDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ log.Printf("[INFO] Deleting pipeline: %s", d.Id())
+
+ _, err := client.PipelineDelete(context.TODO(), d.Id())
+ if err != nil {
+ return fmt.Errorf("Error deleting pipeline: %s", err)
+ }
+
+ return nil
+}
+
+func resourceHerokuPipelineRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ p, err := client.PipelineInfo(context.TODO(), d.Id())
+ if err != nil {
+ return fmt.Errorf("Error retrieving pipeline: %s", err)
+ }
+
+ d.Set("name", p.Name)
+
+ return nil
+}
diff --git a/builtin/providers/heroku/resource_heroku_pipeline_coupling.go b/builtin/providers/heroku/resource_heroku_pipeline_coupling.go
new file mode 100644
index 000000000..90b70447a
--- /dev/null
+++ b/builtin/providers/heroku/resource_heroku_pipeline_coupling.go
@@ -0,0 +1,89 @@
+package heroku
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceHerokuPipelineCoupling() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceHerokuPipelineCouplingCreate,
+ Read: resourceHerokuPipelineCouplingRead,
+ Delete: resourceHerokuPipelineCouplingDelete,
+
+ Schema: map[string]*schema.Schema{
+ "app": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "pipeline": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validateUUID,
+ },
+ "stage": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validatePipelineStageName,
+ },
+ },
+ }
+}
+
+func resourceHerokuPipelineCouplingCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ opts := heroku.PipelineCouplingCreateOpts{
+ App: d.Get("app").(string),
+ Pipeline: d.Get("pipeline").(string),
+ Stage: d.Get("stage").(string),
+ }
+
+ log.Printf("[DEBUG] PipelineCoupling create configuration: %#v", opts)
+
+ p, err := client.PipelineCouplingCreate(context.TODO(), opts)
+ if err != nil {
+ return fmt.Errorf("Error creating pipeline: %s", err)
+ }
+
+ d.SetId(p.ID)
+
+ log.Printf("[INFO] PipelineCoupling ID: %s", d.Id())
+
+ return resourceHerokuPipelineCouplingRead(d, meta)
+}
+
+func resourceHerokuPipelineCouplingDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ log.Printf("[INFO] Deleting pipeline: %s", d.Id())
+
+ _, err := client.PipelineCouplingDelete(context.TODO(), d.Id())
+ if err != nil {
+ return fmt.Errorf("Error deleting pipeline: %s", err)
+ }
+
+ return nil
+}
+
+func resourceHerokuPipelineCouplingRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*heroku.Service)
+
+ p, err := client.PipelineCouplingInfo(context.TODO(), d.Id())
+ if err != nil {
+ return fmt.Errorf("Error retrieving pipeline: %s", err)
+ }
+
+ d.Set("app", p.App)
+ d.Set("pipeline", p.Pipeline)
+ d.Set("stage", p.Stage)
+
+ return nil
+}
diff --git a/builtin/providers/heroku/resource_heroku_pipeline_coupling_test.go b/builtin/providers/heroku/resource_heroku_pipeline_coupling_test.go
new file mode 100644
index 000000000..6fd8b5195
--- /dev/null
+++ b/builtin/providers/heroku/resource_heroku_pipeline_coupling_test.go
@@ -0,0 +1,123 @@
+package heroku
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ heroku "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccHerokuPipelineCoupling_Basic(t *testing.T) {
+ var coupling heroku.PipelineCouplingInfoResult
+
+ appName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
+ pipelineName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
+ stageName := "development"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckHerokuPipelineCouplingDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckHerokuPipelineCouplingConfig_basic(appName, pipelineName, stageName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckHerokuPipelineCouplingExists("heroku_pipeline_coupling.default", &coupling),
+ testAccCheckHerokuPipelineCouplingAttributes(
+ &coupling,
+ "heroku_pipeline.default",
+ stageName,
+ ),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckHerokuPipelineCouplingConfig_basic(appName, pipelineName, stageName string) string {
+ return fmt.Sprintf(`
+resource "heroku_app" "default" {
+ name = "%s"
+ region = "us"
+}
+
+resource "heroku_pipeline" "default" {
+ name = "%s"
+}
+
+resource "heroku_pipeline_coupling" "default" {
+ app = "${heroku_app.default.id}"
+ pipeline = "${heroku_pipeline.default.id}"
+ stage = "%s"
+}
+`, appName, pipelineName, stageName)
+}
+
+func testAccCheckHerokuPipelineCouplingExists(n string, pipeline *heroku.PipelineCouplingInfoResult) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No coupling ID set")
+ }
+
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ foundPipelineCoupling, err := client.PipelineCouplingInfo(context.TODO(), rs.Primary.ID)
+ if err != nil {
+ return err
+ }
+
+ if foundPipelineCoupling.ID != rs.Primary.ID {
+ return fmt.Errorf("PipelineCoupling not found: %s != %s", foundPipelineCoupling.ID, rs.Primary.ID)
+ }
+
+ *pipeline = *foundPipelineCoupling
+
+ return nil
+ }
+}
+
+func testAccCheckHerokuPipelineCouplingAttributes(coupling *heroku.PipelineCouplingInfoResult, pipelineResource, stageName string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ pipeline, ok := s.RootModule().Resources[pipelineResource]
+ if !ok {
+ return fmt.Errorf("Pipeline not found: %s", pipelineResource)
+ }
+
+ if coupling.Pipeline.ID != pipeline.Primary.ID {
+ return fmt.Errorf("Bad pipeline ID: %v != %v", coupling.Pipeline.ID, pipeline.Primary.ID)
+ }
+ if coupling.Stage != stageName {
+ return fmt.Errorf("Bad stage: %s", coupling.Stage)
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckHerokuPipelineCouplingDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "heroku_pipeline_coupling" {
+ continue
+ }
+
+ _, err := client.PipelineCouplingInfo(context.TODO(), rs.Primary.ID)
+
+ if err == nil {
+ return fmt.Errorf("PipelineCoupling still exists")
+ }
+ }
+
+ return nil
+}
diff --git a/builtin/providers/heroku/resource_heroku_pipeline_test.go b/builtin/providers/heroku/resource_heroku_pipeline_test.go
new file mode 100644
index 000000000..1c40e1403
--- /dev/null
+++ b/builtin/providers/heroku/resource_heroku_pipeline_test.go
@@ -0,0 +1,96 @@
+package heroku
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ heroku "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccHerokuPipeline_Basic(t *testing.T) {
+ var pipeline heroku.PipelineInfoResult
+ pipelineName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
+ pipelineName2 := fmt.Sprintf("%s-2", pipelineName)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckHerokuPipelineDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccCheckHerokuPipelineConfig_basic(pipelineName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckHerokuPipelineExists("heroku_pipeline.foobar", &pipeline),
+ resource.TestCheckResourceAttr(
+ "heroku_pipeline.foobar", "name", pipelineName),
+ ),
+ },
+ {
+ Config: testAccCheckHerokuPipelineConfig_basic(pipelineName2),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(
+ "heroku_pipeline.foobar", "name", pipelineName2),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckHerokuPipelineConfig_basic(pipelineName string) string {
+ return fmt.Sprintf(`
+resource "heroku_pipeline" "foobar" {
+ name = "%s"
+}
+`, pipelineName)
+}
+
+func testAccCheckHerokuPipelineExists(n string, pipeline *heroku.PipelineInfoResult) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No pipeline name set")
+ }
+
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ foundPipeline, err := client.PipelineInfo(context.TODO(), rs.Primary.ID)
+ if err != nil {
+ return err
+ }
+
+ if foundPipeline.ID != rs.Primary.ID {
+ return fmt.Errorf("Pipeline not found")
+ }
+
+ *pipeline = *foundPipeline
+
+ return nil
+ }
+}
+
+func testAccCheckHerokuPipelineDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "heroku_pipeline" {
+ continue
+ }
+
+ _, err := client.PipelineInfo(context.TODO(), rs.Primary.ID)
+
+ if err == nil {
+ return fmt.Errorf("Pipeline still exists")
+ }
+ }
+
+ return nil
+}
diff --git a/builtin/providers/heroku/resource_heroku_space.go b/builtin/providers/heroku/resource_heroku_space.go
index 3e90fffbb..fcabee429 100644
--- a/builtin/providers/heroku/resource_heroku_space.go
+++ b/builtin/providers/heroku/resource_heroku_space.go
@@ -2,9 +2,12 @@ package heroku
import (
"context"
+ "fmt"
"log"
+ "time"
heroku "github.com/cyberdelia/heroku-go/v3"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@@ -56,23 +59,32 @@ func resourceHerokuSpaceCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(space.ID)
log.Printf("[INFO] Space ID: %s", d.Id())
- // The type conversion here can be dropped when the vendored version of
- // heroku-go is updated.
- setSpaceAttributes(d, (*heroku.Space)(space))
- return nil
+ // Wait for the Space to be allocated
+ log.Printf("[DEBUG] Waiting for Space (%s) to be allocated", d.Id())
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"allocating"},
+ Target: []string{"allocated"},
+ Refresh: SpaceStateRefreshFunc(client, d.Id()),
+ Timeout: 20 * time.Minute,
+ }
+
+ if _, err := stateConf.WaitForState(); err != nil {
+ return fmt.Errorf("Error waiting for Space (%s) to become available: %s", d.Id(), err)
+ }
+
+ return resourceHerokuSpaceRead(d, meta)
}
func resourceHerokuSpaceRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
- space, err := client.SpaceInfo(context.TODO(), d.Id())
+ spaceRaw, _, err := SpaceStateRefreshFunc(client, d.Id())()
if err != nil {
return err
}
+ space := spaceRaw.(*heroku.Space)
- // The type conversion here can be dropped when the vendored version of
- // heroku-go is updated.
- setSpaceAttributes(d, (*heroku.Space)(space))
+ setSpaceAttributes(d, space)
return nil
}
@@ -115,3 +127,18 @@ func resourceHerokuSpaceDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
+
+// SpaceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
+// a Space.
+func SpaceStateRefreshFunc(client *heroku.Service, id string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ space, err := client.SpaceInfo(context.TODO(), id)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // The type conversion here can be dropped when the vendored version of
+ // heroku-go is updated.
+ return (*heroku.Space)(space), space.State, nil
+ }
+}
diff --git a/builtin/providers/heroku/validators.go b/builtin/providers/heroku/validators.go
new file mode 100644
index 000000000..0b3702247
--- /dev/null
+++ b/builtin/providers/heroku/validators.go
@@ -0,0 +1,38 @@
+package heroku
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/satori/uuid"
+)
+
+func validatePipelineStageName(v interface{}, k string) (ws []string, errors []error) {
+ validPipelineStageNames := []string{
+ "review",
+ "development",
+ "staging",
+ "production",
+ }
+
+ for _, s := range validPipelineStageNames {
+ if v == s {
+ return
+ }
+ }
+
+ err := fmt.Errorf(
+ "%s is an invalid pipeline stage, must be one of [%s]",
+ v,
+ strings.Join(validPipelineStageNames, ", "),
+ )
+ errors = append(errors, err)
+ return
+}
+
+func validateUUID(v interface{}, k string) (ws []string, errors []error) {
+ if _, err := uuid.FromString(v.(string)); err != nil {
+ errors = append(errors, fmt.Errorf("%q is an invalid UUID: %s", k, err))
+ }
+ return
+}
diff --git a/builtin/providers/heroku/validators_test.go b/builtin/providers/heroku/validators_test.go
new file mode 100644
index 000000000..6131be8bc
--- /dev/null
+++ b/builtin/providers/heroku/validators_test.go
@@ -0,0 +1,53 @@
+package heroku
+
+import "testing"
+
+func TestPipelineStage(t *testing.T) {
+ valid := []string{
+ "review",
+ "development",
+ "staging",
+ "production",
+ }
+ for _, v := range valid {
+ _, errors := validatePipelineStageName(v, "stage")
+ if len(errors) != 0 {
+ t.Fatalf("%q should be a valid stage: %q", v, errors)
+ }
+ }
+
+ invalid := []string{
+ "foobarbaz",
+ "another-stage",
+ "",
+ }
+ for _, v := range invalid {
+ _, errors := validatePipelineStageName(v, "stage")
+ if len(errors) == 0 {
+ t.Fatalf("%q should be an invalid stage", v)
+ }
+ }
+}
+
+func TestValidateUUID(t *testing.T) {
+ valid := []string{
+ "4812ccbc-2a2e-4c6c-bae4-a3d04ed51c0e",
+ }
+ for _, v := range valid {
+ _, errors := validateUUID(v, "id")
+ if len(errors) != 0 {
+ t.Fatalf("%q should be a valid UUID: %q", v, errors)
+ }
+ }
+
+ invalid := []string{
+ "foobarbaz",
+ "my-app-name",
+ }
+ for _, v := range invalid {
+ _, errors := validateUUID(v, "id")
+ if len(errors) == 0 {
+ t.Fatalf("%q should be an invalid UUID", v)
+ }
+ }
+}
diff --git a/builtin/providers/nomad/provider.go b/builtin/providers/nomad/provider.go
index d23c4ad97..61f8603bc 100644
--- a/builtin/providers/nomad/provider.go
+++ b/builtin/providers/nomad/provider.go
@@ -24,6 +24,24 @@ func Provider() terraform.ResourceProvider {
DefaultFunc: schema.EnvDefaultFunc("NOMAD_REGION", ""),
Description: "Region of the target Nomad agent.",
},
+ "ca_file": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("NOMAD_CACERT", ""),
+ Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.",
+ },
+ "cert_file": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("NOMAD_CLIENT_CERT", ""),
+ Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.",
+ },
+ "key_file": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("NOMAD_CLIENT_KEY", ""),
+ Description: "A path to a PEM-encoded private key, required if cert_file is specified.",
+ },
},
ConfigureFunc: providerConfigure,
@@ -38,6 +56,9 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := api.DefaultConfig()
config.Address = d.Get("address").(string)
config.Region = d.Get("region").(string)
+ config.TLSConfig.CACert = d.Get("ca_file").(string)
+ config.TLSConfig.ClientCert = d.Get("cert_file").(string)
+ config.TLSConfig.ClientKey = d.Get("key_file").(string)
client, err := api.NewClient(config)
if err != nil {
diff --git a/builtin/providers/nomad/resource_job_test.go b/builtin/providers/nomad/resource_job_test.go
index c43f5aa1f..9b4fe9f36 100644
--- a/builtin/providers/nomad/resource_job_test.go
+++ b/builtin/providers/nomad/resource_job_test.go
@@ -207,15 +207,17 @@ func testResourceJob_checkExists(s *terraform.State) error {
func testResourceJob_checkDestroy(jobID string) r.TestCheckFunc {
return func(*terraform.State) error {
client := testProvider.Meta().(*api.Client)
- _, _, err := client.Jobs().Info(jobID, nil)
- if err != nil && strings.Contains(err.Error(), "404") {
+ job, _, err := client.Jobs().Info(jobID, nil)
+ // This should likely never happen, due to how nomad caches jobs
+ if err != nil && strings.Contains(err.Error(), "404") || job == nil {
return nil
}
- if err == nil {
- err = errors.New("not destroyed")
+
+ if job.Status != "dead" {
+ return fmt.Errorf("Job %q has not been stopped. Status: %s", jobID, job.Status)
}
- return err
+ return nil
}
}
@@ -284,9 +286,12 @@ func testResourceJob_updateCheck(s *terraform.State) error {
{
// Verify foo doesn't exist
- _, _, err := client.Jobs().Info("foo", nil)
- if err == nil {
- return errors.New("reading foo success")
+ job, _, err := client.Jobs().Info("foo", nil)
+ if err != nil {
+ return fmt.Errorf("error reading %q job: %s", "foo", err)
+ }
+ if job.Status != "dead" {
+ return fmt.Errorf("%q job is not dead. Status: %q", "foo", job.Status)
}
}
diff --git a/builtin/providers/postgresql/resource_postgresql_database.go b/builtin/providers/postgresql/resource_postgresql_database.go
index d236b02d7..66f59fca1 100644
--- a/builtin/providers/postgresql/resource_postgresql_database.go
+++ b/builtin/providers/postgresql/resource_postgresql_database.go
@@ -122,6 +122,12 @@ func resourcePostgreSQLDatabaseCreate(d *schema.ResourceData, meta interface{})
b := bytes.NewBufferString("CREATE DATABASE ")
fmt.Fprint(b, pq.QuoteIdentifier(dbName))
+ //needed in order to set the owner of the db if the connection user is not a superuser
+ err = grantRoleMembership(conn, d.Get(dbOwnerAttr).(string), c.username)
+ if err != nil {
+ return errwrap.Wrapf(fmt.Sprintf("Error granting role membership on database %s: {{err}}", dbName), err)
+ }
+
// Handle each option individually and stream results into the query
// buffer.
@@ -464,3 +470,18 @@ func doSetDBIsTemplate(conn *sql.DB, dbName string, isTemplate bool) error {
return nil
}
+
+func grantRoleMembership(conn *sql.DB, dbOwner string, connUsername string) error {
+ if dbOwner != "" && dbOwner != connUsername {
+ query := fmt.Sprintf("GRANT %s TO %s", pq.QuoteIdentifier(dbOwner), pq.QuoteIdentifier(connUsername))
+ _, err := conn.Query(query)
+ if err != nil {
+ // is already member or role
+ if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
+ return nil
+ }
+ return errwrap.Wrapf("Error granting membership: {{err}}", err)
+ }
+ }
+ return nil
+}
diff --git a/builtin/providers/profitbricks/data_source_image_test.go b/builtin/providers/profitbricks/data_source_image_test.go
index 3efe6d325..3f8f151a4 100644
--- a/builtin/providers/profitbricks/data_source_image_test.go
+++ b/builtin/providers/profitbricks/data_source_image_test.go
@@ -17,7 +17,7 @@ func TestAccDataSourceImage_basic(t *testing.T) {
Config: testAccDataSourceProfitBricksImage_basic,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("data.profitbricks_image.img", "location", "us/las"),
- resource.TestCheckResourceAttr("data.profitbricks_image.img", "name", "Ubuntu-16.04-LTS-server-2017-02-01"),
+ resource.TestCheckResourceAttr("data.profitbricks_image.img", "name", "Ubuntu-16.04-LTS-server-2017-05-01"),
resource.TestCheckResourceAttr("data.profitbricks_image.img", "type", "HDD"),
),
},
diff --git a/builtin/providers/profitbricks/resource_profitbricks_server.go b/builtin/providers/profitbricks/resource_profitbricks_server.go
index bfcd1678a..c617f691d 100644
--- a/builtin/providers/profitbricks/resource_profitbricks_server.go
+++ b/builtin/providers/profitbricks/resource_profitbricks_server.go
@@ -254,34 +254,38 @@ func resourceProfitBricksServerCreate(d *schema.ResourceData, meta interface{})
var sshkey_path []interface{}
var image, licenceType, availabilityZone string
- if !IsValidUUID(rawMap["image_name"].(string)) {
- if rawMap["image_name"] != nil {
- image = getImageId(d.Get("datacenter_id").(string), rawMap["image_name"].(string), rawMap["disk_type"].(string))
- if image == "" {
- dc := profitbricks.GetDatacenter(d.Get("datacenter_id").(string))
- return fmt.Errorf("Image '%s' doesn't exist. in location %s", rawMap["image_name"], dc.Properties.Location)
-
- }
- }
- } else {
- image = rawMap["image_name"].(string)
- }
-
- if rawMap["licence_type"] != nil {
- licenceType = rawMap["licence_type"].(string)
- }
-
if rawMap["image_password"] != nil {
imagePassword = rawMap["image_password"].(string)
}
if rawMap["ssh_key_path"] != nil {
sshkey_path = rawMap["ssh_key_path"].([]interface{})
}
- if rawMap["image_name"] != nil {
+
+ image_name := rawMap["image_name"].(string)
+ if !IsValidUUID(image_name) {
if imagePassword == "" && len(sshkey_path) == 0 {
- return fmt.Errorf("Either 'image_password' or 'ssh_key_path' must be provided.")
+ return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
+ }
+ image = getImageId(d.Get("datacenter_id").(string), image_name, rawMap["disk_type"].(string))
+ } else {
+ img := profitbricks.GetImage(image_name)
+ if img.StatusCode > 299 {
+ return fmt.Errorf("Error fetching image: %s", img.Response)
+ }
+ if img.Properties.Public == true {
+ if imagePassword == "" && len(sshkey_path) == 0 {
+ return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
+ }
+ image = image_name
+ } else {
+ image = image_name
}
}
+
+ if rawMap["licence_type"] != nil {
+ licenceType = rawMap["licence_type"].(string)
+ }
+
var publicKeys []string
if len(sshkey_path) != 0 {
for _, path := range sshkey_path {
diff --git a/builtin/providers/profitbricks/resource_profitbricks_volume.go b/builtin/providers/profitbricks/resource_profitbricks_volume.go
index 8fca17854..46d8ff47d 100644
--- a/builtin/providers/profitbricks/resource_profitbricks_volume.go
+++ b/builtin/providers/profitbricks/resource_profitbricks_volume.go
@@ -77,12 +77,6 @@ func resourceProfitBricksVolumeCreate(d *schema.ResourceData, meta interface{})
ssh_keypath = d.Get("ssh_key_path").([]interface{})
image_name := d.Get("image_name").(string)
- if image_name != "" {
- if imagePassword == "" && len(ssh_keypath) == 0 {
- return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
- }
- }
-
licenceType := d.Get("licence_type").(string)
if image_name == "" && licenceType == "" {
@@ -102,10 +96,26 @@ func resourceProfitBricksVolumeCreate(d *schema.ResourceData, meta interface{})
}
var image string
- if !IsValidUUID(image_name) {
- image = getImageId(d.Get("datacenter_id").(string), image_name, d.Get("disk_type").(string))
- } else {
- image = image_name
+ if image_name != "" {
+ if !IsValidUUID(image_name) {
+ if imagePassword == "" && len(ssh_keypath) == 0 {
+ return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
+ }
+ image = getImageId(d.Get("datacenter_id").(string), image_name, d.Get("disk_type").(string))
+ } else {
+ img := profitbricks.GetImage(image_name)
+ if img.StatusCode > 299 {
+ return fmt.Errorf("Error fetching image: %s", img.Response)
+ }
+ if img.Properties.Public == true {
+ if imagePassword == "" && len(ssh_keypath) == 0 {
+ return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
+ }
+ image = image_name
+ } else {
+ image = image_name
+ }
+ }
}
volume := profitbricks.Volume{
diff --git a/builtin/providers/test/data_source_test.go b/builtin/providers/test/data_source_test.go
index 3f4e5ada6..77e235f3d 100644
--- a/builtin/providers/test/data_source_test.go
+++ b/builtin/providers/test/data_source_test.go
@@ -99,3 +99,59 @@ resource "test_resource" "foo" {
},
})
}
+
+// TestDataSource_dataSourceCountGrandChild tests that a grandchild data source
+// that is based off of count works, ie: dependency chain foo -> bar -> baz.
+// This was failing because CountBoundaryTransformer is being run during apply
+// instead of plan, which meant that it wasn't firing after data sources were
+// potentially changing state and causing diff/interpolation issues.
+//
+// This happens after the initial apply, after state is saved.
+func TestDataSource_dataSourceCountGrandChild(t *testing.T) {
+ resource.UnitTest(t, resource.TestCase{
+ Providers: testAccProviders,
+ CheckDestroy: func(s *terraform.State) error {
+ return nil
+ },
+ Steps: []resource.TestStep{
+ {
+ Config: dataSourceCountGrandChildConfig,
+ },
+ {
+ Config: dataSourceCountGrandChildConfig,
+ Check: func(s *terraform.State) error {
+ for _, v := range []string{"foo", "bar", "baz"} {
+ count := 0
+ for k := range s.RootModule().Resources {
+ if strings.HasPrefix(k, fmt.Sprintf("data.test_data_source.%s.", v)) {
+ count++
+ }
+ }
+
+ if count != 2 {
+ return fmt.Errorf("bad count for data.test_data_source.%s: %d", v, count)
+ }
+ }
+ return nil
+ },
+ },
+ },
+ })
+}
+
+const dataSourceCountGrandChildConfig = `
+data "test_data_source" "foo" {
+ count = 2
+ input = "one"
+}
+
+data "test_data_source" "bar" {
+ count = "${length(data.test_data_source.foo.*.id)}"
+ input = "${data.test_data_source.foo.*.output[count.index]}"
+}
+
+data "test_data_source" "baz" {
+ count = "${length(data.test_data_source.bar.*.id)}"
+ input = "${data.test_data_source.bar.*.output[count.index]}"
+}
+`
diff --git a/builtin/providers/triton/provider.go b/builtin/providers/triton/provider.go
index 4c21722c2..8a56b5dc4 100644
--- a/builtin/providers/triton/provider.go
+++ b/builtin/providers/triton/provider.go
@@ -42,6 +42,12 @@ func Provider() terraform.ResourceProvider {
Required: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_ID", "SDC_KEY_ID"}, ""),
},
+
+ "insecure_skip_tls_verify": {
+ Type: schema.TypeBool,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("TRITON_SKIP_TLS_VERIFY", ""),
+ },
},
ResourcesMap: map[string]*schema.Resource{
@@ -56,10 +62,11 @@ func Provider() terraform.ResourceProvider {
}
type Config struct {
- Account string
- KeyMaterial string
- KeyID string
- URL string
+ Account string
+ KeyMaterial string
+ KeyID string
+ URL string
+ InsecureSkipTLSVerify bool
}
func (c Config) validate() error {
@@ -98,6 +105,10 @@ func (c Config) getTritonClient() (*triton.Client, error) {
return nil, errwrap.Wrapf("Error Creating Triton Client: {{err}}", err)
}
+ if c.InsecureSkipTLSVerify {
+ client.InsecureSkipTLSVerify()
+ }
+
return client, nil
}
@@ -106,6 +117,8 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
Account: d.Get("account").(string),
URL: d.Get("url").(string),
KeyID: d.Get("key_id").(string),
+
+ InsecureSkipTLSVerify: d.Get("insecure_skip_tls_verify").(bool),
}
if keyMaterial, ok := d.GetOk("key_material"); ok {
diff --git a/builtin/providers/triton/resource_machine.go b/builtin/providers/triton/resource_machine.go
index 7eb66591c..85b26a8a5 100644
--- a/builtin/providers/triton/resource_machine.go
+++ b/builtin/providers/triton/resource_machine.go
@@ -23,6 +23,7 @@ var (
"user_script": "user-script",
"user_data": "user-data",
"administrator_pw": "administrator-pw",
+ "cloud_config": "cloud-init:user-data",
}
)
@@ -182,6 +183,12 @@ func resourceMachine() *schema.Resource {
Optional: true,
Computed: true,
},
+ "cloud_config": {
+ Description: "copied to machine on boot",
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
"user_data": {
Description: "Data copied to machine on boot",
Type: schema.TypeString,
diff --git a/builtin/providers/vault/provider.go b/builtin/providers/vault/provider.go
index ceebd4acf..d9c7719e9 100644
--- a/builtin/providers/vault/provider.go
+++ b/builtin/providers/vault/provider.go
@@ -87,6 +87,7 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
+ "vault_auth_backend": authBackendResource(),
"vault_generic_secret": genericSecretResource(),
"vault_policy": policyResource(),
},
diff --git a/builtin/providers/vault/resource_auth_backend.go b/builtin/providers/vault/resource_auth_backend.go
new file mode 100644
index 000000000..800155040
--- /dev/null
+++ b/builtin/providers/vault/resource_auth_backend.go
@@ -0,0 +1,121 @@
+package vault
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/vault/api"
+)
+
+func authBackendResource() *schema.Resource {
+ return &schema.Resource{
+ Create: authBackendWrite,
+ Delete: authBackendDelete,
+ Read: authBackendRead,
+
+ Schema: map[string]*schema.Schema{
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "Name of the auth backend",
+ },
+
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ Description: "path to mount the backend. This defaults to the type.",
+ ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) {
+ value := v.(string)
+ if strings.HasSuffix(value, "/") {
+ errs = append(errs, errors.New("cannot write to a path ending in '/'"))
+ }
+ return
+ },
+ },
+
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Optional: true,
+ Description: "The description of the auth backend",
+ },
+ },
+ }
+}
+
+func authBackendWrite(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*api.Client)
+
+ name := d.Get("type").(string)
+ desc := d.Get("description").(string)
+ path := d.Get("path").(string)
+
+ log.Printf("[DEBUG] Writing auth %s to Vault", name)
+
+ var err error
+
+ if path == "" {
+ path = name
+ err = d.Set("path", name)
+ if err != nil {
+ return fmt.Errorf("unable to set state: %s", err)
+ }
+ }
+
+ err = client.Sys().EnableAuth(path, name, desc)
+
+ if err != nil {
+ return fmt.Errorf("error writing to Vault: %s", err)
+ }
+
+ d.SetId(name)
+
+ return nil
+}
+
+func authBackendDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*api.Client)
+
+ name := d.Id()
+
+ log.Printf("[DEBUG] Deleting auth %s from Vault", name)
+
+ err := client.Sys().DisableAuth(name)
+
+ if err != nil {
+ return fmt.Errorf("error disabling auth from Vault: %s", err)
+ }
+
+ return nil
+}
+
+func authBackendRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*api.Client)
+
+ name := d.Id()
+
+ auths, err := client.Sys().ListAuth()
+
+ if err != nil {
+ return fmt.Errorf("error reading from Vault: %s", err)
+ }
+
+ for path, auth := range auths {
+ configuredPath := d.Get("path").(string)
+
+ vaultPath := configuredPath + "/"
+ if auth.Type == name && path == vaultPath {
+ return nil
+ }
+ }
+
+ // If we fell out here then we didn't find our Auth in the list.
+ d.SetId("")
+ return nil
+}
diff --git a/builtin/providers/vault/resource_auth_backend_test.go b/builtin/providers/vault/resource_auth_backend_test.go
new file mode 100644
index 000000000..344eafbd5
--- /dev/null
+++ b/builtin/providers/vault/resource_auth_backend_test.go
@@ -0,0 +1,129 @@
+package vault
+
+import (
+ "fmt"
+ "testing"
+
+ r "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/hashicorp/vault/api"
+)
+
+func TestResourceAuth(t *testing.T) {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: testResourceAuth_initialConfig,
+ Check: testResourceAuth_initialCheck,
+ },
+ r.TestStep{
+ Config: testResourceAuth_updateConfig,
+ Check: testResourceAuth_updateCheck,
+ },
+ },
+ })
+}
+
+var testResourceAuth_initialConfig = `
+
+resource "vault_auth_backend" "test" {
+ type = "github"
+}
+
+`
+
+func testResourceAuth_initialCheck(s *terraform.State) error {
+ resourceState := s.Modules[0].Resources["vault_auth_backend.test"]
+ if resourceState == nil {
+ return fmt.Errorf("resource not found in state")
+ }
+
+ instanceState := resourceState.Primary
+ if instanceState == nil {
+ return fmt.Errorf("resource has no primary instance")
+ }
+
+ name := instanceState.ID
+
+ if name != instanceState.Attributes["type"] {
+ return fmt.Errorf("id doesn't match name")
+ }
+
+ if name != "github" {
+ return fmt.Errorf("unexpected auth name %s", name)
+ }
+
+ client := testProvider.Meta().(*api.Client)
+ auths, err := client.Sys().ListAuth()
+
+ if err != nil {
+ return fmt.Errorf("error reading back auth: %s", err)
+ }
+
+ found := false
+ for _, auth := range auths {
+ if auth.Type == name {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("could not find auth backend %s in %+v", name, auths)
+ }
+
+ return nil
+}
+
+var testResourceAuth_updateConfig = `
+
+resource "vault_auth_backend" "test" {
+ type = "ldap"
+}
+
+`
+
+func testResourceAuth_updateCheck(s *terraform.State) error {
+ resourceState := s.Modules[0].Resources["vault_auth_backend.test"]
+ if resourceState == nil {
+ return fmt.Errorf("resource not found in state")
+ }
+
+ instanceState := resourceState.Primary
+ if instanceState == nil {
+ return fmt.Errorf("resource has no primary instance")
+ }
+
+ name := instanceState.ID
+
+ if name != instanceState.Attributes["type"] {
+ return fmt.Errorf("id doesn't match name")
+ }
+
+ if name != "ldap" {
+ return fmt.Errorf("unexpected auth name")
+ }
+
+ client := testProvider.Meta().(*api.Client)
+ auths, err := client.Sys().ListAuth()
+
+ if err != nil {
+ return fmt.Errorf("error reading back auth: %s", err)
+ }
+
+ found := false
+ for _, auth := range auths {
+ if auth.Type == name {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("could not find auth backend %s in %+v", name, auths)
+ }
+
+ return nil
+}
diff --git a/builtin/provisioners/remote-exec/resource_provisioner.go b/builtin/provisioners/remote-exec/resource_provisioner.go
index 248ce5d5f..7dd86daf0 100644
--- a/builtin/provisioners/remote-exec/resource_provisioner.go
+++ b/builtin/provisioners/remote-exec/resource_provisioner.go
@@ -22,7 +22,7 @@ import (
func Provisioner() terraform.ResourceProvisioner {
return &schema.Provisioner{
Schema: map[string]*schema.Schema{
- "inline": &schema.Schema{
+ "inline": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
PromoteSingle: true,
@@ -30,13 +30,13 @@ func Provisioner() terraform.ResourceProvisioner {
ConflictsWith: []string{"script", "scripts"},
},
- "script": &schema.Schema{
+ "script": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"inline", "scripts"},
},
- "scripts": &schema.Schema{
+ "scripts": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
@@ -81,7 +81,11 @@ func applyFn(ctx context.Context) error {
func generateScripts(d *schema.ResourceData) ([]string, error) {
var lines []string
for _, l := range d.Get("inline").([]interface{}) {
- lines = append(lines, l.(string))
+ line, ok := l.(string)
+ if !ok {
+ return nil, fmt.Errorf("Error parsing %v as a string", l)
+ }
+ lines = append(lines, line)
}
lines = append(lines, "")
@@ -109,12 +113,20 @@ func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) {
// Collect scripts
var scripts []string
if script, ok := d.GetOk("script"); ok {
- scripts = append(scripts, script.(string))
+ scr, ok := script.(string)
+ if !ok {
+ return nil, fmt.Errorf("Error parsing script %v as string", script)
+ }
+ scripts = append(scripts, scr)
}
if scriptList, ok := d.GetOk("scripts"); ok {
for _, script := range scriptList.([]interface{}) {
- scripts = append(scripts, script.(string))
+ scr, ok := script.(string)
+ if !ok {
+ return nil, fmt.Errorf("Error parsing script %v as string", script)
+ }
+ scripts = append(scripts, scr)
}
}
diff --git a/builtin/provisioners/remote-exec/resource_provisioner_test.go b/builtin/provisioners/remote-exec/resource_provisioner_test.go
index 69e5e9cdf..aa69cad61 100644
--- a/builtin/provisioners/remote-exec/resource_provisioner_test.go
+++ b/builtin/provisioners/remote-exec/resource_provisioner_test.go
@@ -9,6 +9,8 @@ import (
"testing"
"time"
+ "strings"
+
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
@@ -71,6 +73,23 @@ func TestResourceProvider_generateScript(t *testing.T) {
}
}
+func TestResourceProvider_generateScriptEmptyInline(t *testing.T) {
+ p := Provisioner().(*schema.Provisioner)
+ conf := map[string]interface{}{
+ "inline": []interface{}{""},
+ }
+
+ _, err := generateScripts(schema.TestResourceDataRaw(
+ t, p.Schema, conf))
+ if err == nil {
+ t.Fatal("expected error, got none")
+ }
+
+ if !strings.Contains(err.Error(), "Error parsing") {
+ t.Fatalf("expected parsing error, got: %s", err)
+ }
+}
+
func TestResourceProvider_CollectScripts_inline(t *testing.T) {
p := Provisioner().(*schema.Provisioner)
conf := map[string]interface{}{
@@ -162,6 +181,24 @@ func TestResourceProvider_CollectScripts_scripts(t *testing.T) {
}
}
+func TestResourceProvider_CollectScripts_scriptsEmpty(t *testing.T) {
+ p := Provisioner().(*schema.Provisioner)
+ conf := map[string]interface{}{
+ "scripts": []interface{}{""},
+ }
+
+ _, err := collectScripts(schema.TestResourceDataRaw(
+ t, p.Schema, conf))
+
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ if !strings.Contains(err.Error(), "Error parsing") {
+ t.Fatalf("Expected parsing error, got: %s", err)
+ }
+}
+
func TestRetryFunc(t *testing.T) {
// succeed on the third try
errs := []error{io.EOF, &net.OpError{Err: errors.New("ERROR")}, nil}
diff --git a/command/env_command.go b/command/env_command.go
index 9548122a1..f29c9456f 100644
--- a/command/env_command.go
+++ b/command/env_command.go
@@ -57,7 +57,7 @@ const (
envDoesNotExist = `
Environment %q doesn't exist!
-You can create this environment with the "-new" option.`
+You can create this environment with the "new" option.`
envChanged = `[reset][green]Switched to environment %q!`
diff --git a/command/hook_count.go b/command/hook_count.go
index 150ae438e..127284d34 100644
--- a/command/hook_count.go
+++ b/command/hook_count.go
@@ -42,6 +42,10 @@ func (h *CountHook) PreApply(
h.Lock()
defer h.Unlock()
+ if d.Empty() {
+ return terraform.HookActionContinue, nil
+ }
+
if h.pending == nil {
h.pending = make(map[string]countHookAction)
}
diff --git a/command/hook_ui.go b/command/hook_ui.go
index 8d8f4539e..a53edfa38 100644
--- a/command/hook_ui.go
+++ b/command/hook_ui.go
@@ -59,6 +59,11 @@ func (h *UiHook) PreApply(
d *terraform.InstanceDiff) (terraform.HookAction, error) {
h.once.Do(h.init)
+ // if there's no diff, there's nothing to output
+ if d.Empty() {
+ return terraform.HookActionContinue, nil
+ }
+
id := n.HumanId()
op := uiResourceModify
diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go
index b79334718..4962d4f37 100644
--- a/config/interpolate_funcs.go
+++ b/config/interpolate_funcs.go
@@ -4,6 +4,7 @@ import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"encoding/base64"
"encoding/hex"
"encoding/json"
@@ -57,6 +58,7 @@ func Funcs() map[string]ast.Function {
"base64decode": interpolationFuncBase64Decode(),
"base64encode": interpolationFuncBase64Encode(),
"base64sha256": interpolationFuncBase64Sha256(),
+ "base64sha512": interpolationFuncBase64Sha512(),
"ceil": interpolationFuncCeil(),
"chomp": interpolationFuncChomp(),
"cidrhost": interpolationFuncCidrHost(),
@@ -90,6 +92,7 @@ func Funcs() map[string]ast.Function {
"replace": interpolationFuncReplace(),
"sha1": interpolationFuncSha1(),
"sha256": interpolationFuncSha256(),
+ "sha512": interpolationFuncSha512(),
"signum": interpolationFuncSignum(),
"slice": interpolationFuncSlice(),
"sort": interpolationFuncSort(),
@@ -1240,6 +1243,20 @@ func interpolationFuncSha256() ast.Function {
}
}
+func interpolationFuncSha512() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha512.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
func interpolationFuncTrimSpace() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
@@ -1266,6 +1283,21 @@ func interpolationFuncBase64Sha256() ast.Function {
}
}
+func interpolationFuncBase64Sha512() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha512.New()
+ h.Write([]byte(s))
+ shaSum := h.Sum(nil)
+ encoded := base64.StdEncoding.EncodeToString(shaSum[:])
+ return encoded, nil
+ },
+ }
+}
+
func interpolationFuncUUID() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{},
diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go
index 57e59afa6..b0c7bafff 100644
--- a/config/interpolate_funcs_test.go
+++ b/config/interpolate_funcs_test.go
@@ -2070,6 +2070,18 @@ func TestInterpolateFuncSha256(t *testing.T) {
})
}
+func TestInterpolateFuncSha512(t *testing.T) {
+ testFunction(t, testFunctionConfig{
+ Cases: []testFunctionCase{
+ {
+ `${sha512("test")}`,
+ "ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff",
+ false,
+ },
+ },
+ })
+}
+
func TestInterpolateFuncTitle(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
@@ -2129,6 +2141,23 @@ func TestInterpolateFuncBase64Sha256(t *testing.T) {
})
}
+func TestInterpolateFuncBase64Sha512(t *testing.T) {
+ testFunction(t, testFunctionConfig{
+ Cases: []testFunctionCase{
+ {
+ `${base64sha512("test")}`,
+ "7iaw3Ur350mqGo7jwQrpkj9hiYB3Lkc/iBml1JQODbJ6wYX4oOHV+E+IvIh/1nsUNzLDBMxfqa2Ob1f1ACio/w==",
+ false,
+ },
+ { // This will differ because we're base64-encoding hex represantiation, not raw bytes
+ `${base64encode(sha512("test"))}`,
+ "ZWUyNmIwZGQ0YWY3ZTc0OWFhMWE4ZWUzYzEwYWU5OTIzZjYxODk4MDc3MmU0NzNmODgxOWE1ZDQ5NDBlMGRiMjdhYzE4NWY4YTBlMWQ1Zjg0Zjg4YmM4ODdmZDY3YjE0MzczMmMzMDRjYzVmYTlhZDhlNmY1N2Y1MDAyOGE4ZmY=",
+ false,
+ },
+ },
+ })
+}
+
func TestInterpolateFuncMd5(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
diff --git a/config/loader_hcl.go b/config/loader_hcl.go
index a40ad5ba7..9abb1960f 100644
--- a/config/loader_hcl.go
+++ b/config/loader_hcl.go
@@ -327,6 +327,10 @@ func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
// represents exactly one module definition in the HCL configuration.
// We leave it up to another pass to merge them together.
func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
+ if err := assertAllBlocksHaveNames("module", list); err != nil {
+ return nil, err
+ }
+
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@@ -391,12 +395,12 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
// LoadOutputsHcl recurses into the given HCL object and turns
// it into a mapping of outputs.
func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
- list = list.Children()
- if len(list.Items) == 0 {
- return nil, fmt.Errorf(
- "'output' must be followed by exactly one string: a name")
+ if err := assertAllBlocksHaveNames("output", list); err != nil {
+ return nil, err
}
+ list = list.Children()
+
// Go through each object and turn it into an actual result.
result := make([]*Output, 0, len(list.Items))
for _, item := range list.Items {
@@ -450,12 +454,12 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
// LoadVariablesHcl recurses into the given HCL object and turns
// it into a list of variables.
func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
- list = list.Children()
- if len(list.Items) == 0 {
- return nil, fmt.Errorf(
- "'variable' must be followed by exactly one strings: a name")
+ if err := assertAllBlocksHaveNames("variable", list); err != nil {
+ return nil, err
}
+ list = list.Children()
+
// hclVariable is the structure each variable is decoded into
type hclVariable struct {
DeclaredType string `hcl:"type"`
@@ -531,6 +535,10 @@ func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
// LoadProvidersHcl recurses into the given HCL object and turns
// it into a mapping of provider configs.
func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
+ if err := assertAllBlocksHaveNames("provider", list); err != nil {
+ return nil, err
+ }
+
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@@ -592,6 +600,10 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
// represents exactly one data definition in the HCL configuration.
// We leave it up to another pass to merge them together.
func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+ if err := assertAllBlocksHaveNames("data", list); err != nil {
+ return nil, err
+ }
+
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@@ -901,6 +913,10 @@ func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
}
func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
+ if err := assertAllBlocksHaveNames("provisioner", list); err != nil {
+ return nil, err
+ }
+
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@@ -1023,6 +1039,29 @@ func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
}
*/
+// assertAllBlocksHaveNames returns an error if any of the items in
+// the given object list are blocks without keys (like "module {}")
+// or simple assignments (like "module = 1"). It returns nil if
+// neither of these things are true.
+//
+// The given name is used in any generated error messages, and should
+// be the name of the block we're dealing with. The given list should
+// be the result of calling .Filter on an object list with that same
+// name.
+func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error {
+ if elem := list.Elem(); len(elem.Items) != 0 {
+ switch et := elem.Items[0].Val.(type) {
+ case *ast.ObjectType:
+ pos := et.Lbrace
+ return fmt.Errorf("%s: %q must be followed by a name", pos, name)
+ default:
+ pos := elem.Items[0].Val.Pos()
+ return fmt.Errorf("%s: %q must be a configuration block", pos, name)
+ }
+ }
+ return nil
+}
+
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
diff --git a/config/loader_test.go b/config/loader_test.go
index a2a2929f2..a3aeb7321 100644
--- a/config/loader_test.go
+++ b/config/loader_test.go
@@ -314,6 +314,18 @@ func TestLoadFileBasic_modules(t *testing.T) {
}
}
+func TestLoadFile_unnamedModule(t *testing.T) {
+ _, err := LoadFile(filepath.Join(fixtureDir, "module-unnamed.tf"))
+ if err == nil {
+ t.Fatalf("bad: expected error")
+ }
+
+ errorStr := err.Error()
+ if !strings.Contains(errorStr, `"module" must be followed`) {
+ t.Fatalf("bad: expected error has wrong text: %s", errorStr)
+ }
+}
+
func TestLoadFile_outputDependsOn(t *testing.T) {
c, err := LoadFile(filepath.Join(fixtureDir, "output-depends-on.tf"))
if err != nil {
@@ -696,7 +708,7 @@ func TestLoadFile_variableNoName(t *testing.T) {
}
errorStr := err.Error()
- if !strings.Contains(errorStr, "'variable' must be followed") {
+ if !strings.Contains(errorStr, `"variable" must be followed`) {
t.Fatalf("bad: expected error has wrong text: %s", errorStr)
}
}
@@ -740,7 +752,7 @@ func TestLoadFile_unnamedOutput(t *testing.T) {
}
errorStr := err.Error()
- if !strings.Contains(errorStr, "'output' must be followed") {
+ if !strings.Contains(errorStr, `"output" must be followed`) {
t.Fatalf("bad: expected error has wrong text: %s", errorStr)
}
}
diff --git a/config/test-fixtures/module-unnamed.tf b/config/test-fixtures/module-unnamed.tf
new file mode 100644
index 000000000..e285519bf
--- /dev/null
+++ b/config/test-fixtures/module-unnamed.tf
@@ -0,0 +1,7 @@
+module "okay" {
+ source = "./okay"
+}
+
+module {
+ source = "./not-okay"
+}
diff --git a/config/test-fixtures/output-unnamed.tf b/config/test-fixtures/output-unnamed.tf
index 7e7529153..7ef8ebe1b 100644
--- a/config/test-fixtures/output-unnamed.tf
+++ b/config/test-fixtures/output-unnamed.tf
@@ -1,3 +1,7 @@
+output "okay" {
+ value = "bar"
+}
+
output {
value = "foo"
}
diff --git a/config/test-fixtures/variable-no-name.tf b/config/test-fixtures/variable-no-name.tf
index f3856886f..7f09d1e64 100644
--- a/config/test-fixtures/variable-no-name.tf
+++ b/config/test-fixtures/variable-no-name.tf
@@ -1,3 +1,6 @@
+variable "okay" {
+}
+
variable {
name = "test"
default = "test_value"
diff --git a/examples/azure-vm-simple-linux-managed-disk/README.md b/examples/azure-vm-simple-linux-managed-disk/README.md
new file mode 100644
index 000000000..b8d0706a1
--- /dev/null
+++ b/examples/azure-vm-simple-linux-managed-disk/README.md
@@ -0,0 +1,20 @@
+# Very simple deployment of a Linux VM
+
+This template allows you to deploy a simple Linux VM using a few different options for the Ubuntu version, using the latest patched version. This will deploy an A0 size VM in the resource group location and return the FQDN of the VM.
+
+This template takes a minimum amount of parameters and deploys a Linux VM, using the latest patched version.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` file is present in the current directory, Terraform automatically loads it to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use `-var-file` to load it.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
diff --git a/examples/azure-vm-simple-linux-managed-disk/deploy.ci.sh b/examples/azure-vm-simple-linux-managed-disk/deploy.ci.sh
new file mode 100755
index 000000000..03e56e00a
--- /dev/null
+++ b/examples/azure-vm-simple-linux-managed-disk/deploy.ci.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+set -o errexit -o nounset
+
+docker run --rm -it \
+ -e ARM_CLIENT_ID \
+ -e ARM_CLIENT_SECRET \
+ -e ARM_SUBSCRIPTION_ID \
+ -e ARM_TENANT_ID \
+ -v $(pwd):/data \
+ --workdir=/data \
+ --entrypoint "/bin/sh" \
+ hashicorp/terraform:light \
+ -c "/bin/terraform get; \
+ /bin/terraform validate; \
+ /bin/terraform plan -out=out.tfplan -var dns_name=$KEY -var hostname=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD; \
+ /bin/terraform apply out.tfplan; \
+ /bin/terraform show;"
+
+# cleanup deployed azure resources via azure-cli
+docker run --rm -it \
+ azuresdk/azure-cli-python \
+ sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
+ az vm show -g $KEY -n rgvm"
+
+# cleanup deployed azure resources via terraform
+docker run --rm -it \
+ -e ARM_CLIENT_ID \
+ -e ARM_CLIENT_SECRET \
+ -e ARM_SUBSCRIPTION_ID \
+ -e ARM_TENANT_ID \
+ -v $(pwd):/data \
+ --workdir=/data \
+ --entrypoint "/bin/sh" \
+ hashicorp/terraform:light \
+ -c "/bin/terraform destroy -force -var dns_name=$KEY -var hostname=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD;"
diff --git a/examples/azure-vm-simple-linux-managed-disk/deploy.mac.sh b/examples/azure-vm-simple-linux-managed-disk/deploy.mac.sh
new file mode 100755
index 000000000..9c6563f07
--- /dev/null
+++ b/examples/azure-vm-simple-linux-managed-disk/deploy.mac.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -o errexit -o nounset
+
+if docker -v; then
+
+ # generate a unique string for CI deployment
+ export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
+ export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
+
+ /bin/sh ./deploy.ci.sh
+
+else
+ echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
+fi
diff --git a/examples/azure-vm-simple-linux-managed-disk/main.tf b/examples/azure-vm-simple-linux-managed-disk/main.tf
new file mode 100644
index 000000000..ee44db013
--- /dev/null
+++ b/examples/azure-vm-simple-linux-managed-disk/main.tf
@@ -0,0 +1,108 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.virtual_network_name}"
+ location = "${var.location}"
+ address_space = ["${var.address_space}"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "${var.rg_prefix}subnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "${var.subnet_prefix}"
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "${var.rg_prefix}nic"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ ip_configuration {
+ name = "${var.rg_prefix}ipconfig"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${azurerm_public_ip.pip.id}"
+ }
+}
+
+resource "azurerm_public_ip" "pip" {
+ name = "${var.rg_prefix}-ip"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "Dynamic"
+ domain_name_label = "${var.dns_name}"
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.dns_name}stor"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_managed_disk" "datadisk" {
+ name = "${var.hostname}-datadisk"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ storage_account_type = "Standard_LRS"
+ create_option = "Empty"
+ disk_size_gb = "1023"
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "${var.rg_prefix}vm"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+
+ storage_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.image_sku}"
+ version = "${var.image_version}"
+ }
+
+ storage_os_disk {
+ name = "${var.hostname}-osdisk"
+ managed_disk_type = "Standard_LRS"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ }
+
+ storage_data_disk {
+ name = "${var.hostname}-datadisk"
+ managed_disk_id = "${azurerm_managed_disk.datadisk.id}"
+ managed_disk_type = "Standard_LRS"
+ disk_size_gb = "1023"
+ create_option = "Attach"
+ lun = 0
+ }
+
+ os_profile {
+ computer_name = "${var.hostname}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+
+ boot_diagnostics {
+ enabled = true
+ storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}"
+ }
+}
diff --git a/examples/azure-vm-simple-linux-managed-disk/outputs.tf b/examples/azure-vm-simple-linux-managed-disk/outputs.tf
new file mode 100644
index 000000000..13768e554
--- /dev/null
+++ b/examples/azure-vm-simple-linux-managed-disk/outputs.tf
@@ -0,0 +1,11 @@
+output "hostname" {
+ value = "${var.hostname}"
+}
+
+output "vm_fqdn" {
+ value = "${azurerm_public_ip.pip.fqdn}"
+}
+
+output "ssh_command" {
+ value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}"
+}
diff --git a/examples/azure-vm-simple-linux-managed-disk/variables.tf b/examples/azure-vm-simple-linux-managed-disk/variables.tf
new file mode 100644
index 000000000..17fbe337d
--- /dev/null
+++ b/examples/azure-vm-simple-linux-managed-disk/variables.tf
@@ -0,0 +1,75 @@
+variable "resource_group" {
+ description = "The name of the resource group in which to create the virtual network."
+}
+
+variable "rg_prefix" {
+ description = "The shortened abbreviation to represent your resource group that will go on the front of some resources."
+ default = "rg"
+}
+
+variable "hostname" {
+ description = "VM name referenced also in storage-related names."
+}
+
+variable "dns_name" {
+ description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
+
+variable "virtual_network_name" {
+ description = "The name for the virtual network."
+ default = "vnet"
+}
+
+variable "address_space" {
+ description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
+ default = "10.0.0.0/16"
+}
+
+variable "subnet_prefix" {
+ description = "The address prefix to use for the subnet."
+ default = "10.0.10.0/24"
+}
+
+variable "storage_account_type" {
+ description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
+ default = "Standard_LRS"
+}
+
+variable "vm_size" {
+ description = "Specifies the size of the virtual machine."
+ default = "Standard_A0"
+}
+
+variable "image_publisher" {
+ description = "name of the publisher of the image (az vm image list)"
+ default = "Canonical"
+}
+
+variable "image_offer" {
+ description = "the name of the offer (az vm image list)"
+ default = "UbuntuServer"
+}
+
+variable "image_sku" {
+ description = "image sku to apply (az vm image list)"
+ default = "16.04-LTS"
+}
+
+variable "image_version" {
+ description = "version of the image to apply (az vm image list)"
+ default = "latest"
+}
+
+variable "admin_username" {
+ description = "administrator user name"
+ default = "vmadmin"
+}
+
+variable "admin_password" {
+ description = "administrator password (recommended to disable password auth)"
+}
diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go
index 3e8c54190..3851e406b 100644
--- a/terraform/context_plan_test.go
+++ b/terraform/context_plan_test.go
@@ -3154,3 +3154,146 @@ func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) {
t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
}
}
+
+// TestContext2Plan_resourceNestedCount ensures resource sets that depend on
+// the count of another resource set (ie: count of a data source that depends
+// on another data source's instance count - data.x.foo.*.id) get properly
+// normalized to the indexes they should be. This case comes up when there is
+// an existing state (after an initial apply).
+func TestContext2Plan_resourceNestedCount(t *testing.T) {
+ m := testModule(t, "nested-resource-count-plan")
+ p := testProvider("aws")
+ p.DiffFn = testDiffFn
+ p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) {
+ return is, nil
+ }
+ s := &State{
+ Modules: []*ModuleState{
+ &ModuleState{
+ Path: rootModulePath,
+ Resources: map[string]*ResourceState{
+ "aws_instance.foo.0": &ResourceState{
+ Type: "aws_instance",
+ Primary: &InstanceState{
+ ID: "foo0",
+ Attributes: map[string]string{
+ "id": "foo0",
+ },
+ },
+ },
+ "aws_instance.foo.1": &ResourceState{
+ Type: "aws_instance",
+ Primary: &InstanceState{
+ ID: "foo1",
+ Attributes: map[string]string{
+ "id": "foo1",
+ },
+ },
+ },
+ "aws_instance.bar.0": &ResourceState{
+ Type: "aws_instance",
+ Dependencies: []string{"aws_instance.foo.*"},
+ Primary: &InstanceState{
+ ID: "bar0",
+ Attributes: map[string]string{
+ "id": "bar0",
+ },
+ },
+ },
+ "aws_instance.bar.1": &ResourceState{
+ Type: "aws_instance",
+ Dependencies: []string{"aws_instance.foo.*"},
+ Primary: &InstanceState{
+ ID: "bar1",
+ Attributes: map[string]string{
+ "id": "bar1",
+ },
+ },
+ },
+ "aws_instance.baz.0": &ResourceState{
+ Type: "aws_instance",
+ Dependencies: []string{"aws_instance.bar.*"},
+ Primary: &InstanceState{
+ ID: "baz0",
+ Attributes: map[string]string{
+ "id": "baz0",
+ },
+ },
+ },
+ "aws_instance.baz.1": &ResourceState{
+ Type: "aws_instance",
+ Dependencies: []string{"aws_instance.bar.*"},
+ Primary: &InstanceState{
+ ID: "baz1",
+ Attributes: map[string]string{
+ "id": "baz1",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ ctx := testContext2(t, &ContextOpts{
+ Module: m,
+ Providers: map[string]ResourceProviderFactory{
+ "aws": testProviderFuncFixed(p),
+ },
+ State: s,
+ })
+
+ w, e := ctx.Validate()
+ if len(w) > 0 {
+ t.Fatalf("warnings generated on validate: %#v", w)
+ }
+ if len(e) > 0 {
+ t.Fatalf("errors generated on validate: %#v", e)
+ }
+
+ _, err := ctx.Refresh()
+ if err != nil {
+ t.Fatalf("refresh err: %s", err)
+ }
+
+ plan, err := ctx.Plan()
+ if err != nil {
+ t.Fatalf("plan err: %s", err)
+ }
+
+ actual := strings.TrimSpace(plan.String())
+ expected := strings.TrimSpace(`
+DIFF:
+
+
+
+STATE:
+
+aws_instance.bar.0:
+ ID = bar0
+
+ Dependencies:
+ aws_instance.foo.*
+aws_instance.bar.1:
+ ID = bar1
+
+ Dependencies:
+ aws_instance.foo.*
+aws_instance.baz.0:
+ ID = baz0
+
+ Dependencies:
+ aws_instance.bar.*
+aws_instance.baz.1:
+ ID = baz1
+
+ Dependencies:
+ aws_instance.bar.*
+aws_instance.foo.0:
+ ID = foo0
+aws_instance.foo.1:
+ ID = foo1
+`)
+ if actual != expected {
+ t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected)
+ }
+}
diff --git a/terraform/graph_builder_plan.go b/terraform/graph_builder_plan.go
index 02d869700..a6a3a90d4 100644
--- a/terraform/graph_builder_plan.go
+++ b/terraform/graph_builder_plan.go
@@ -113,6 +113,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
// have to connect again later for providers and so on.
&ReferenceTransformer{},
+ // Add the node to fix the state count boundaries
+ &CountBoundaryTransformer{},
+
// Target
&TargetsTransformer{Targets: b.Targets},
diff --git a/terraform/graph_builder_plan_test.go b/terraform/graph_builder_plan_test.go
index 23526a9ac..25578ebaf 100644
--- a/terraform/graph_builder_plan_test.go
+++ b/terraform/graph_builder_plan_test.go
@@ -29,7 +29,7 @@ func TestPlanGraphBuilder(t *testing.T) {
actual := strings.TrimSpace(g.String())
expected := strings.TrimSpace(testPlanGraphBuilderStr)
if actual != expected {
- t.Fatalf("bad: %s", actual)
+ t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
}
}
@@ -61,6 +61,14 @@ aws_load_balancer.weblb
provider.aws
aws_security_group.firewall
provider.aws
+meta.count-boundary (count boundary fixup)
+ aws_instance.web
+ aws_load_balancer.weblb
+ aws_security_group.firewall
+ openstack_floating_ip.random
+ provider.aws
+ provider.openstack
+ var.foo
openstack_floating_ip.random
provider.openstack
provider.aws
@@ -75,6 +83,7 @@ provider.openstack (close)
openstack_floating_ip.random
provider.openstack
root
+ meta.count-boundary (count boundary fixup)
provider.aws (close)
provider.openstack (close)
var.foo
diff --git a/terraform/test-fixtures/nested-resource-count-plan/main.tf b/terraform/test-fixtures/nested-resource-count-plan/main.tf
new file mode 100644
index 000000000..f803fd1f6
--- /dev/null
+++ b/terraform/test-fixtures/nested-resource-count-plan/main.tf
@@ -0,0 +1,11 @@
+resource "aws_instance" "foo" {
+ count = 2
+}
+
+resource "aws_instance" "bar" {
+ count = "${length(aws_instance.foo.*.id)}"
+}
+
+resource "aws_instance" "baz" {
+ count = "${length(aws_instance.bar.*.id)}"
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/client.go
new file mode 100644
index 000000000..916cd5043
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/client.go
@@ -0,0 +1,59 @@
+// Package sql implements the Azure ARM Sql service API version 2014-04-01.
+//
+// Provides create, read, update and delete functionality for Azure SQL
+// resources including servers, databases, elastic pools, recommendations,
+// operations, and usage metrics.
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // APIVersion is the version of the Sql
+ APIVersion = "2014-04-01"
+
+ // DefaultBaseURI is the default URI used for the service Sql
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// ManagementClient is the base client for Sql.
+type ManagementClient struct {
+ autorest.Client
+ BaseURI string
+ APIVersion string
+ SubscriptionID string
+}
+
+// New creates an instance of the ManagementClient client.
+func New(subscriptionID string) ManagementClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the ManagementClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
+ return ManagementClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ APIVersion: APIVersion,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/databases.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/databases.go
new file mode 100644
index 000000000..70a13d619
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/databases.go
@@ -0,0 +1,934 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "net/http"
+)
+
+// DatabasesClient is the provides create, read, update and delete
+// functionality for Azure SQL resources including servers, databases,
+// elastic pools, recommendations, operations, and usage metrics.
+type DatabasesClient struct {
+ ManagementClient
+}
+
+// NewDatabasesClient creates an instance of the DatabasesClient client.
+func NewDatabasesClient(subscriptionID string) DatabasesClient {
+ return NewDatabasesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewDatabasesClientWithBaseURI creates an instance of the DatabasesClient
+// client.
+func NewDatabasesClientWithBaseURI(baseURI string, subscriptionID string) DatabasesClient {
+ return DatabasesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new Azure SQL database or updates an existing
+// Azure SQL database. Location is a required property in the request body,
+// and it must be the same as the location of the SQL server. This method may
+// poll for completion. Polling can be canceled by passing the cancel channel
+// argument. The channel will be used to cancel polling and any outstanding
+// HTTP requests.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database to be operated on (updated or
+// created). parameters is the required parameters for creating or updating a
+// database.
+func (client DatabasesClient) CreateOrUpdate(resourceGroupName string, serverName string, databaseName string, parameters Database, cancel <-chan struct{}) (result autorest.Response, err error) {
+ req, err := client.CreateOrUpdatePreparer(resourceGroupName, serverName, databaseName, parameters, cancel)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "CreateOrUpdate", resp, "Failure sending request")
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client DatabasesClient) CreateOrUpdatePreparer(resourceGroupName string, serverName string, databaseName string, parameters Database, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// CreateOrUpdateTransparentDataEncryptionConfiguration creates or updates an
+// Azure SQL Database Transparent Data Encryption Operation.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database for which setting the Transparent
+// Data Encryption applies. parameters is the required parameters for
+// creating or updating transparent data encryption.
+func (client DatabasesClient) CreateOrUpdateTransparentDataEncryptionConfiguration(resourceGroupName string, serverName string, databaseName string, parameters TransparentDataEncryption) (result TransparentDataEncryption, err error) {
+ req, err := client.CreateOrUpdateTransparentDataEncryptionConfigurationPreparer(resourceGroupName, serverName, databaseName, parameters)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "CreateOrUpdateTransparentDataEncryptionConfiguration", nil, "Failure preparing request")
+ }
+
+ resp, err := client.CreateOrUpdateTransparentDataEncryptionConfigurationSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "CreateOrUpdateTransparentDataEncryptionConfiguration", resp, "Failure sending request")
+ }
+
+ result, err = client.CreateOrUpdateTransparentDataEncryptionConfigurationResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "CreateOrUpdateTransparentDataEncryptionConfiguration", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdateTransparentDataEncryptionConfigurationPreparer prepares the CreateOrUpdateTransparentDataEncryptionConfiguration request.
+func (client DatabasesClient) CreateOrUpdateTransparentDataEncryptionConfigurationPreparer(resourceGroupName string, serverName string, databaseName string, parameters TransparentDataEncryption) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/current", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// CreateOrUpdateTransparentDataEncryptionConfigurationSender sends the CreateOrUpdateTransparentDataEncryptionConfiguration request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) CreateOrUpdateTransparentDataEncryptionConfigurationSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// CreateOrUpdateTransparentDataEncryptionConfigurationResponder handles the response to the CreateOrUpdateTransparentDataEncryptionConfiguration request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) CreateOrUpdateTransparentDataEncryptionConfigurationResponder(resp *http.Response) (result TransparentDataEncryption, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an Azure SQL database.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database to be deleted.
+func (client DatabasesClient) Delete(resourceGroupName string, serverName string, databaseName string) (result autorest.Response, err error) {
+ req, err := client.DeletePreparer(resourceGroupName, serverName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "Delete", nil, "Failure preparing request")
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "Delete", resp, "Failure sending request")
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client DatabasesClient) DeletePreparer(resourceGroupName string, serverName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about an Azure SQL database.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database to be retrieved. expand is the comma
+// separated list of child objects to expand in the response. Possible
+// properties: serviceTierAdvisors, upgradeHint, transparentDataEncryption.
+func (client DatabasesClient) Get(resourceGroupName string, serverName string, databaseName string, expand string) (result Database, err error) {
+ req, err := client.GetPreparer(resourceGroupName, serverName, databaseName, expand)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "Get", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "Get", resp, "Failure sending request")
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client DatabasesClient) GetPreparer(resourceGroupName string, serverName string, databaseName string, expand string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) GetResponder(resp *http.Response) (result Database, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetServiceTierAdvisor gets information about a service tier advisor.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of database. serviceTierAdvisorName is the name of service
+// tier advisor.
+func (client DatabasesClient) GetServiceTierAdvisor(resourceGroupName string, serverName string, databaseName string, serviceTierAdvisorName string) (result ServiceTierAdvisor, err error) {
+ req, err := client.GetServiceTierAdvisorPreparer(resourceGroupName, serverName, databaseName, serviceTierAdvisorName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "GetServiceTierAdvisor", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetServiceTierAdvisorSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "GetServiceTierAdvisor", resp, "Failure sending request")
+ }
+
+ result, err = client.GetServiceTierAdvisorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "GetServiceTierAdvisor", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetServiceTierAdvisorPreparer prepares the GetServiceTierAdvisor request.
+func (client DatabasesClient) GetServiceTierAdvisorPreparer(resourceGroupName string, serverName string, databaseName string, serviceTierAdvisorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "serviceTierAdvisorName": autorest.Encode("path", serviceTierAdvisorName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/serviceTierAdvisors/{serviceTierAdvisorName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetServiceTierAdvisorSender sends the GetServiceTierAdvisor request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) GetServiceTierAdvisorSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetServiceTierAdvisorResponder handles the response to the GetServiceTierAdvisor request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) GetServiceTierAdvisorResponder(resp *http.Response) (result ServiceTierAdvisor, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetTransparentDataEncryptionConfiguration gets an Azure SQL Database
+// Transparent Data Encryption Response.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database for which the Transparent Data
+// Encryption applies.
+func (client DatabasesClient) GetTransparentDataEncryptionConfiguration(resourceGroupName string, serverName string, databaseName string) (result TransparentDataEncryption, err error) {
+ req, err := client.GetTransparentDataEncryptionConfigurationPreparer(resourceGroupName, serverName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "GetTransparentDataEncryptionConfiguration", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetTransparentDataEncryptionConfigurationSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "GetTransparentDataEncryptionConfiguration", resp, "Failure sending request")
+ }
+
+ result, err = client.GetTransparentDataEncryptionConfigurationResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "GetTransparentDataEncryptionConfiguration", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetTransparentDataEncryptionConfigurationPreparer prepares the GetTransparentDataEncryptionConfiguration request.
+func (client DatabasesClient) GetTransparentDataEncryptionConfigurationPreparer(resourceGroupName string, serverName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/current", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetTransparentDataEncryptionConfigurationSender sends the GetTransparentDataEncryptionConfiguration request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) GetTransparentDataEncryptionConfigurationSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetTransparentDataEncryptionConfigurationResponder handles the response to the GetTransparentDataEncryptionConfiguration request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) GetTransparentDataEncryptionConfigurationResponder(resp *http.Response) (result TransparentDataEncryption, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByServer returns information about an Azure SQL database.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client DatabasesClient) ListByServer(resourceGroupName string, serverName string) (result DatabaseListResult, err error) {
+ req, err := client.ListByServerPreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListByServer", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListByServerSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListByServer", resp, "Failure sending request")
+ }
+
+ result, err = client.ListByServerResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListByServer", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByServerPreparer prepares the ListByServer request.
+func (client DatabasesClient) ListByServerPreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByServerSender sends the ListByServer request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) ListByServerSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByServerResponder handles the response to the ListByServer request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) ListByServerResponder(resp *http.Response) (result DatabaseListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListRestorePoints returns a list of Azure SQL database restore points.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database from which to retrieve available
+// restore points.
+func (client DatabasesClient) ListRestorePoints(resourceGroupName string, serverName string, databaseName string) (result RestorePointListResult, err error) {
+ req, err := client.ListRestorePointsPreparer(resourceGroupName, serverName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListRestorePoints", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListRestorePointsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListRestorePoints", resp, "Failure sending request")
+ }
+
+ result, err = client.ListRestorePointsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListRestorePoints", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListRestorePointsPreparer prepares the ListRestorePoints request.
+func (client DatabasesClient) ListRestorePointsPreparer(resourceGroupName string, serverName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/restorePoints", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListRestorePointsSender sends the ListRestorePoints request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) ListRestorePointsSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListRestorePointsResponder handles the response to the ListRestorePoints request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) ListRestorePointsResponder(resp *http.Response) (result RestorePointListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListServiceTierAdvisors returns information about service tier advisors for
+// specified database.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of database.
+func (client DatabasesClient) ListServiceTierAdvisors(resourceGroupName string, serverName string, databaseName string) (result ServiceTierAdvisorListResult, err error) {
+ req, err := client.ListServiceTierAdvisorsPreparer(resourceGroupName, serverName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListServiceTierAdvisors", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListServiceTierAdvisorsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListServiceTierAdvisors", resp, "Failure sending request")
+ }
+
+ result, err = client.ListServiceTierAdvisorsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListServiceTierAdvisors", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListServiceTierAdvisorsPreparer prepares the ListServiceTierAdvisors request.
+func (client DatabasesClient) ListServiceTierAdvisorsPreparer(resourceGroupName string, serverName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/serviceTierAdvisors", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListServiceTierAdvisorsSender sends the ListServiceTierAdvisors request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) ListServiceTierAdvisorsSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListServiceTierAdvisorsResponder handles the response to the ListServiceTierAdvisors request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) ListServiceTierAdvisorsResponder(resp *http.Response) (result ServiceTierAdvisorListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListTransparentDataEncryptionActivity returns an Azure SQL Database
+// Transparent Data Encryption Activity Response.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database for which the Transparent Data
+// Encryption applies.
+func (client DatabasesClient) ListTransparentDataEncryptionActivity(resourceGroupName string, serverName string, databaseName string) (result TransparentDataEncryptionActivityListResult, err error) {
+ req, err := client.ListTransparentDataEncryptionActivityPreparer(resourceGroupName, serverName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListTransparentDataEncryptionActivity", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListTransparentDataEncryptionActivitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListTransparentDataEncryptionActivity", resp, "Failure sending request")
+ }
+
+ result, err = client.ListTransparentDataEncryptionActivityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListTransparentDataEncryptionActivity", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListTransparentDataEncryptionActivityPreparer prepares the ListTransparentDataEncryptionActivity request.
+func (client DatabasesClient) ListTransparentDataEncryptionActivityPreparer(resourceGroupName string, serverName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/current/operationResults", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListTransparentDataEncryptionActivitySender sends the ListTransparentDataEncryptionActivity request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) ListTransparentDataEncryptionActivitySender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListTransparentDataEncryptionActivityResponder handles the response to the ListTransparentDataEncryptionActivity request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) ListTransparentDataEncryptionActivityResponder(resp *http.Response) (result TransparentDataEncryptionActivityListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListUsages returns information about Azure SQL database usages.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL database.
+func (client DatabasesClient) ListUsages(resourceGroupName string, serverName string, databaseName string) (result DatabaseMetricListResult, err error) {
+ req, err := client.ListUsagesPreparer(resourceGroupName, serverName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListUsages", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListUsagesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListUsages", resp, "Failure sending request")
+ }
+
+ result, err = client.ListUsagesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "ListUsages", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListUsagesPreparer prepares the ListUsages request.
+func (client DatabasesClient) ListUsagesPreparer(resourceGroupName string, serverName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/usages", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListUsagesSender sends the ListUsages request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) ListUsagesSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListUsagesResponder handles the response to the ListUsages request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) ListUsagesResponder(resp *http.Response) (result DatabaseMetricListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// PauseDataWarehouse pause an Azure SQL Data Warehouse database. This method
+// may poll for completion. Polling can be canceled by passing the cancel
+// channel argument. The channel will be used to cancel polling and any
+// outstanding HTTP requests.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL Data Warehouse database to pause.
+func (client DatabasesClient) PauseDataWarehouse(resourceGroupName string, serverName string, databaseName string, cancel <-chan struct{}) (result autorest.Response, err error) {
+ req, err := client.PauseDataWarehousePreparer(resourceGroupName, serverName, databaseName, cancel)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "PauseDataWarehouse", nil, "Failure preparing request")
+ }
+
+ resp, err := client.PauseDataWarehouseSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "PauseDataWarehouse", resp, "Failure sending request")
+ }
+
+ result, err = client.PauseDataWarehouseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "PauseDataWarehouse", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// PauseDataWarehousePreparer prepares the PauseDataWarehouse request.
+func (client DatabasesClient) PauseDataWarehousePreparer(resourceGroupName string, serverName string, databaseName string, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/pause", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// PauseDataWarehouseSender sends the PauseDataWarehouse request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) PauseDataWarehouseSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// PauseDataWarehouseResponder handles the response to the PauseDataWarehouse request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) PauseDataWarehouseResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// ResumeDataWarehouse resume an Azure SQL Data Warehouse database. This
+// method may poll for completion. Polling can be canceled by passing the
+// cancel channel argument. The channel will be used to cancel polling and
+// any outstanding HTTP requests.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. databaseName
+// is the name of the Azure SQL Data Warehouse database to resume.
+func (client DatabasesClient) ResumeDataWarehouse(resourceGroupName string, serverName string, databaseName string, cancel <-chan struct{}) (result autorest.Response, err error) {
+ req, err := client.ResumeDataWarehousePreparer(resourceGroupName, serverName, databaseName, cancel)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ResumeDataWarehouse", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ResumeDataWarehouseSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.DatabasesClient", "ResumeDataWarehouse", resp, "Failure sending request")
+ }
+
+ result, err = client.ResumeDataWarehouseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.DatabasesClient", "ResumeDataWarehouse", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ResumeDataWarehousePreparer prepares the ResumeDataWarehouse request.
+func (client DatabasesClient) ResumeDataWarehousePreparer(resourceGroupName string, serverName string, databaseName string, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/resume", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// ResumeDataWarehouseSender sends the ResumeDataWarehouse request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabasesClient) ResumeDataWarehouseSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// ResumeDataWarehouseResponder handles the response to the ResumeDataWarehouse request. The method always
+// closes the http.Response Body.
+func (client DatabasesClient) ResumeDataWarehouseResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/elasticpools.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/elasticpools.go
new file mode 100644
index 000000000..e68d6674a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/elasticpools.go
@@ -0,0 +1,582 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "net/http"
+)
+
+// ElasticPoolsClient is the provides create, read, update and delete
+// functionality for Azure SQL resources including servers, databases,
+// elastic pools, recommendations, operations, and usage metrics.
+type ElasticPoolsClient struct {
+ ManagementClient
+}
+
+// NewElasticPoolsClient creates an instance of the ElasticPoolsClient client.
+func NewElasticPoolsClient(subscriptionID string) ElasticPoolsClient {
+ return NewElasticPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewElasticPoolsClientWithBaseURI creates an instance of the
+// ElasticPoolsClient client.
+func NewElasticPoolsClientWithBaseURI(baseURI string, subscriptionID string) ElasticPoolsClient {
+ return ElasticPoolsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new Azure SQL elastic pool or updates an existing
+// Azure SQL elastic pool. This method may poll for completion. Polling can
+// be canceled by passing the cancel channel argument. The channel will be
+// used to cancel polling and any outstanding HTTP requests.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// elasticPoolName is the name of the Azure SQL Elastic Pool to be operated
+// on (Updated or created). parameters is the required parameters for
+// creating or updating an Elastic Pool.
+func (client ElasticPoolsClient) CreateOrUpdate(resourceGroupName string, serverName string, elasticPoolName string, parameters ElasticPool, cancel <-chan struct{}) (result autorest.Response, err error) {
+ req, err := client.CreateOrUpdatePreparer(resourceGroupName, serverName, elasticPoolName, parameters, cancel)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "CreateOrUpdate", resp, "Failure sending request")
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ElasticPoolsClient) CreateOrUpdatePreparer(resourceGroupName string, serverName string, elasticPoolName string, parameters ElasticPool, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Delete deletes the Azure SQL elastic pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// elasticPoolName is the name of the Azure SQL Elastic Pool to be deleted.
+func (client ElasticPoolsClient) Delete(resourceGroupName string, serverName string, elasticPoolName string) (result autorest.Response, err error) {
+ req, err := client.DeletePreparer(resourceGroupName, serverName, elasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "Delete", nil, "Failure preparing request")
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "Delete", resp, "Failure sending request")
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ElasticPoolsClient) DeletePreparer(resourceGroupName string, serverName string, elasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about an Azure SQL elastic pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// elasticPoolName is the name of the Azure SQL Elastic Pool to be retrieved.
+func (client ElasticPoolsClient) Get(resourceGroupName string, serverName string, elasticPoolName string) (result ElasticPool, err error) {
+ req, err := client.GetPreparer(resourceGroupName, serverName, elasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "Get", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "Get", resp, "Failure sending request")
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ElasticPoolsClient) GetPreparer(resourceGroupName string, serverName string, elasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) GetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) GetResponder(resp *http.Response) (result ElasticPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetDatabase gets information about an Azure SQL database inside of an Azure
+// SQL elastic pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// elasticPoolName is the name of the Azure SQL Elastic Pool to be retrieved.
+// databaseName is the name of the Azure SQL database to be retrieved.
+func (client ElasticPoolsClient) GetDatabase(resourceGroupName string, serverName string, elasticPoolName string, databaseName string) (result Database, err error) {
+ req, err := client.GetDatabasePreparer(resourceGroupName, serverName, elasticPoolName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "GetDatabase", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetDatabaseSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "GetDatabase", resp, "Failure sending request")
+ }
+
+ result, err = client.GetDatabaseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "GetDatabase", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetDatabasePreparer prepares the GetDatabase request.
+func (client ElasticPoolsClient) GetDatabasePreparer(resourceGroupName string, serverName string, elasticPoolName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/databases/{databaseName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetDatabaseSender sends the GetDatabase request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) GetDatabaseSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetDatabaseResponder handles the response to the GetDatabase request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) GetDatabaseResponder(resp *http.Response) (result Database, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListActivity returns information about Azure SQL elastic pool activities.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// elasticPoolName is the name of the Azure SQL Elastic Pool for which to get
+// the current activity.
+func (client ElasticPoolsClient) ListActivity(resourceGroupName string, serverName string, elasticPoolName string) (result ElasticPoolActivityListResult, err error) {
+ req, err := client.ListActivityPreparer(resourceGroupName, serverName, elasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListActivity", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListActivitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListActivity", resp, "Failure sending request")
+ }
+
+ result, err = client.ListActivityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListActivity", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListActivityPreparer prepares the ListActivity request.
+func (client ElasticPoolsClient) ListActivityPreparer(resourceGroupName string, serverName string, elasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/elasticPoolActivity", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListActivitySender sends the ListActivity request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) ListActivitySender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListActivityResponder handles the response to the ListActivity request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) ListActivityResponder(resp *http.Response) (result ElasticPoolActivityListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByServer returns information about Azure SQL elastic pools.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client ElasticPoolsClient) ListByServer(resourceGroupName string, serverName string) (result ElasticPoolListResult, err error) {
+ req, err := client.ListByServerPreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListByServer", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListByServerSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListByServer", resp, "Failure sending request")
+ }
+
+ result, err = client.ListByServerResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListByServer", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByServerPreparer prepares the ListByServer request.
+func (client ElasticPoolsClient) ListByServerPreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByServerSender sends the ListByServer request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) ListByServerSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByServerResponder handles the response to the ListByServer request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) ListByServerResponder(resp *http.Response) (result ElasticPoolListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListDatabaseActivity returns information about activity on Azure SQL
+// databases inside of an Azure SQL elastic pool.
+//
+// elasticPoolName is the name of the Azure SQL Elastic Pool.
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client ElasticPoolsClient) ListDatabaseActivity(elasticPoolName string, resourceGroupName string, serverName string) (result ElasticPoolDatabaseActivityListResult, err error) {
+ req, err := client.ListDatabaseActivityPreparer(elasticPoolName, resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListDatabaseActivity", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListDatabaseActivitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListDatabaseActivity", resp, "Failure sending request")
+ }
+
+ result, err = client.ListDatabaseActivityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListDatabaseActivity", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListDatabaseActivityPreparer prepares the ListDatabaseActivity request.
+func (client ElasticPoolsClient) ListDatabaseActivityPreparer(elasticPoolName string, resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/elasticPoolDatabaseActivity", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListDatabaseActivitySender sends the ListDatabaseActivity request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) ListDatabaseActivitySender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListDatabaseActivityResponder handles the response to the ListDatabaseActivity request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) ListDatabaseActivityResponder(resp *http.Response) (result ElasticPoolDatabaseActivityListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListDatabases returns information about an Azure SQL database inside of an
+// Azure SQL elastic pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// elasticPoolName is the name of the Azure SQL Elastic Pool to be retrieved.
+func (client ElasticPoolsClient) ListDatabases(resourceGroupName string, serverName string, elasticPoolName string) (result DatabaseListResult, err error) {
+ req, err := client.ListDatabasesPreparer(resourceGroupName, serverName, elasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListDatabases", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListDatabasesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListDatabases", resp, "Failure sending request")
+ }
+
+ result, err = client.ListDatabasesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ElasticPoolsClient", "ListDatabases", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListDatabasesPreparer prepares the ListDatabases request.
+func (client ElasticPoolsClient) ListDatabasesPreparer(resourceGroupName string, serverName string, elasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "elasticPoolName": autorest.Encode("path", elasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/databases", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListDatabasesSender sends the ListDatabases request. The method will close the
+// http.Response Body if it receives an error.
+func (client ElasticPoolsClient) ListDatabasesSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListDatabasesResponder handles the response to the ListDatabases request. The method always
+// closes the http.Response Body.
+func (client ElasticPoolsClient) ListDatabasesResponder(resp *http.Response) (result DatabaseListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/models.go
new file mode 100644
index 000000000..497d7defb
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/models.go
@@ -0,0 +1,810 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/satori/uuid"
+)
+
+// CreateMode enumerates the values for create mode.
+type CreateMode string
+
+const (
+ // Copy specifies the copy state for create mode.
+ Copy CreateMode = "Copy"
+ // Default specifies the default state for create mode.
+ Default CreateMode = "Default"
+ // NonReadableSecondary specifies the non readable secondary state for
+ // create mode.
+ NonReadableSecondary CreateMode = "NonReadableSecondary"
+ // OnlineSecondary specifies the online secondary state for create mode.
+ OnlineSecondary CreateMode = "OnlineSecondary"
+ // PointInTimeRestore specifies the point in time restore state for create
+ // mode.
+ PointInTimeRestore CreateMode = "PointInTimeRestore"
+ // Recovery specifies the recovery state for create mode.
+ Recovery CreateMode = "Recovery"
+ // Restore specifies the restore state for create mode.
+ Restore CreateMode = "Restore"
+)
+
+// DatabaseEditions enumerates the values for database editions.
+type DatabaseEditions string
+
+const (
+ // Basic specifies the basic state for database editions.
+ Basic DatabaseEditions = "Basic"
+ // Business specifies the business state for database editions.
+ Business DatabaseEditions = "Business"
+ // DataWarehouse specifies the data warehouse state for database editions.
+ DataWarehouse DatabaseEditions = "DataWarehouse"
+ // Free specifies the free state for database editions.
+ Free DatabaseEditions = "Free"
+ // Premium specifies the premium state for database editions.
+ Premium DatabaseEditions = "Premium"
+ // Standard specifies the standard state for database editions.
+ Standard DatabaseEditions = "Standard"
+ // Stretch specifies the stretch state for database editions.
+ Stretch DatabaseEditions = "Stretch"
+ // Web specifies the web state for database editions.
+ Web DatabaseEditions = "Web"
+)
+
+// ElasticPoolEditions enumerates the values for elastic pool editions.
+type ElasticPoolEditions string
+
+const (
+ // ElasticPoolEditionsBasic specifies the elastic pool editions basic
+ // state for elastic pool editions.
+ ElasticPoolEditionsBasic ElasticPoolEditions = "Basic"
+ // ElasticPoolEditionsPremium specifies the elastic pool editions premium
+ // state for elastic pool editions.
+ ElasticPoolEditionsPremium ElasticPoolEditions = "Premium"
+ // ElasticPoolEditionsStandard specifies the elastic pool editions
+ // standard state for elastic pool editions.
+ ElasticPoolEditionsStandard ElasticPoolEditions = "Standard"
+)
+
+// ElasticPoolState enumerates the values for elastic pool state.
+type ElasticPoolState string
+
+const (
+ // Creating specifies the creating state for elastic pool state.
+ Creating ElasticPoolState = "Creating"
+ // Disabled specifies the disabled state for elastic pool state.
+ Disabled ElasticPoolState = "Disabled"
+ // Ready specifies the ready state for elastic pool state.
+ Ready ElasticPoolState = "Ready"
+)
+
+// RecommendedIndexActions enumerates the values for recommended index actions.
+type RecommendedIndexActions string
+
+const (
+ // Create specifies the create state for recommended index actions.
+ Create RecommendedIndexActions = "Create"
+ // Drop specifies the drop state for recommended index actions.
+ Drop RecommendedIndexActions = "Drop"
+ // Rebuild specifies the rebuild state for recommended index actions.
+ Rebuild RecommendedIndexActions = "Rebuild"
+)
+
+// RecommendedIndexStates enumerates the values for recommended index states.
+type RecommendedIndexStates string
+
+const (
+ // Active specifies the active state for recommended index states.
+ Active RecommendedIndexStates = "Active"
+ // Blocked specifies the blocked state for recommended index states.
+ Blocked RecommendedIndexStates = "Blocked"
+ // Executing specifies the executing state for recommended index states.
+ Executing RecommendedIndexStates = "Executing"
+ // Expired specifies the expired state for recommended index states.
+ Expired RecommendedIndexStates = "Expired"
+ // Ignored specifies the ignored state for recommended index states.
+ Ignored RecommendedIndexStates = "Ignored"
+ // Pending specifies the pending state for recommended index states.
+ Pending RecommendedIndexStates = "Pending"
+ // PendingRevert specifies the pending revert state for recommended index
+ // states.
+ PendingRevert RecommendedIndexStates = "Pending Revert"
+ // Reverted specifies the reverted state for recommended index states.
+ Reverted RecommendedIndexStates = "Reverted"
+ // Reverting specifies the reverting state for recommended index states.
+ Reverting RecommendedIndexStates = "Reverting"
+ // Success specifies the success state for recommended index states.
+ Success RecommendedIndexStates = "Success"
+ // Verifying specifies the verifying state for recommended index states.
+ Verifying RecommendedIndexStates = "Verifying"
+)
+
+// RecommendedIndexTypes enumerates the values for recommended index types.
+type RecommendedIndexTypes string
+
+const (
+ // CLUSTERED specifies the clustered state for recommended index types.
+ CLUSTERED RecommendedIndexTypes = "CLUSTERED"
+ // CLUSTEREDCOLUMNSTORE specifies the clusteredcolumnstore state for
+ // recommended index types.
+ CLUSTEREDCOLUMNSTORE RecommendedIndexTypes = "CLUSTERED COLUMNSTORE"
+ // COLUMNSTORE specifies the columnstore state for recommended index types.
+ COLUMNSTORE RecommendedIndexTypes = "COLUMNSTORE"
+ // NONCLUSTERED specifies the nonclustered state for recommended index
+ // types.
+ NONCLUSTERED RecommendedIndexTypes = "NONCLUSTERED"
+)
+
+// RestorePointTypes enumerates the values for restore point types.
+type RestorePointTypes string
+
+const (
+ // CONTINUOUS specifies the continuous state for restore point types.
+ CONTINUOUS RestorePointTypes = "CONTINUOUS"
+ // DISCRETE specifies the discrete state for restore point types.
+ DISCRETE RestorePointTypes = "DISCRETE"
+)
+
+// ServerVersion enumerates the values for server version.
+type ServerVersion string
+
+const (
+ // OneTwoFullStopZero specifies the one two full stop zero state for
+ // server version.
+ OneTwoFullStopZero ServerVersion = "12.0"
+ // TwoFullStopZero specifies the two full stop zero state for server
+ // version.
+ TwoFullStopZero ServerVersion = "2.0"
+)
+
+// ServiceObjectiveName enumerates the values for service objective name.
+type ServiceObjectiveName string
+
+const (
+ // ServiceObjectiveNameBasic specifies the service objective name basic
+ // state for service objective name.
+ ServiceObjectiveNameBasic ServiceObjectiveName = "Basic"
+ // ServiceObjectiveNameP1 specifies the service objective name p1 state
+ // for service objective name.
+ ServiceObjectiveNameP1 ServiceObjectiveName = "P1"
+ // ServiceObjectiveNameP2 specifies the service objective name p2 state
+ // for service objective name.
+ ServiceObjectiveNameP2 ServiceObjectiveName = "P2"
+ // ServiceObjectiveNameP3 specifies the service objective name p3 state
+ // for service objective name.
+ ServiceObjectiveNameP3 ServiceObjectiveName = "P3"
+ // ServiceObjectiveNameS0 specifies the service objective name s0 state
+ // for service objective name.
+ ServiceObjectiveNameS0 ServiceObjectiveName = "S0"
+ // ServiceObjectiveNameS1 specifies the service objective name s1 state
+ // for service objective name.
+ ServiceObjectiveNameS1 ServiceObjectiveName = "S1"
+ // ServiceObjectiveNameS2 specifies the service objective name s2 state
+ // for service objective name.
+ ServiceObjectiveNameS2 ServiceObjectiveName = "S2"
+ // ServiceObjectiveNameS3 specifies the service objective name s3 state
+ // for service objective name.
+ ServiceObjectiveNameS3 ServiceObjectiveName = "S3"
+)
+
+// TableType enumerates the values for table type.
+type TableType string
+
+const (
+ // BaseTable specifies the base table state for table type.
+ BaseTable TableType = "BaseTable"
+ // View specifies the view state for table type.
+ View TableType = "View"
+)
+
+// TargetDatabaseEditions enumerates the values for target database editions.
+type TargetDatabaseEditions string
+
+const (
+ // TargetDatabaseEditionsBasic specifies the target database editions
+ // basic state for target database editions.
+ TargetDatabaseEditionsBasic TargetDatabaseEditions = "Basic"
+ // TargetDatabaseEditionsDataWarehouse specifies the target database
+ // editions data warehouse state for target database editions.
+ TargetDatabaseEditionsDataWarehouse TargetDatabaseEditions = "DataWarehouse"
+ // TargetDatabaseEditionsFree specifies the target database editions free
+ // state for target database editions.
+ TargetDatabaseEditionsFree TargetDatabaseEditions = "Free"
+ // TargetDatabaseEditionsPremium specifies the target database editions
+ // premium state for target database editions.
+ TargetDatabaseEditionsPremium TargetDatabaseEditions = "Premium"
+ // TargetDatabaseEditionsStandard specifies the target database editions
+ // standard state for target database editions.
+ TargetDatabaseEditionsStandard TargetDatabaseEditions = "Standard"
+ // TargetDatabaseEditionsStretch specifies the target database editions
+ // stretch state for target database editions.
+ TargetDatabaseEditionsStretch TargetDatabaseEditions = "Stretch"
+)
+
+// TargetElasticPoolEditions enumerates the values for target elastic pool
+// editions.
+type TargetElasticPoolEditions string
+
+const (
+ // TargetElasticPoolEditionsBasic specifies the target elastic pool
+ // editions basic state for target elastic pool editions.
+ TargetElasticPoolEditionsBasic TargetElasticPoolEditions = "Basic"
+ // TargetElasticPoolEditionsPremium specifies the target elastic pool
+ // editions premium state for target elastic pool editions.
+ TargetElasticPoolEditionsPremium TargetElasticPoolEditions = "Premium"
+ // TargetElasticPoolEditionsStandard specifies the target elastic pool
+ // editions standard state for target elastic pool editions.
+ TargetElasticPoolEditionsStandard TargetElasticPoolEditions = "Standard"
+)
+
+// TransparentDataEncryptionActivityStates enumerates the values for
+// transparent data encryption activity states.
+type TransparentDataEncryptionActivityStates string
+
+const (
+ // Decrypting specifies the decrypting state for transparent data
+ // encryption activity states.
+ Decrypting TransparentDataEncryptionActivityStates = "Decrypting"
+ // Encrypting specifies the encrypting state for transparent data
+ // encryption activity states.
+ Encrypting TransparentDataEncryptionActivityStates = "Encrypting"
+)
+
+// TransparentDataEncryptionStates enumerates the values for transparent data
+// encryption states.
+type TransparentDataEncryptionStates string
+
+const (
+ // TransparentDataEncryptionStatesDisabled specifies the transparent data
+ // encryption states disabled state for transparent data encryption
+ // states.
+ TransparentDataEncryptionStatesDisabled TransparentDataEncryptionStates = "Disabled"
+ // TransparentDataEncryptionStatesEnabled specifies the transparent data
+ // encryption states enabled state for transparent data encryption states.
+ TransparentDataEncryptionStatesEnabled TransparentDataEncryptionStates = "Enabled"
+)
+
+// Column is represents an Azure SQL Database table column.
+type Column struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *ColumnProperties `json:"properties,omitempty"`
+}
+
+// ColumnProperties is represents the properties of an Azure SQL Database
+// table column.
+type ColumnProperties struct {
+ ColumnType *string `json:"columnType,omitempty"`
+}
+
+// Database is represents an Azure SQL Database.
+type Database struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *DatabaseProperties `json:"properties,omitempty"`
+}
+
+// DatabaseListResult is represents the response to a List Azure SQL Database
+// request.
+type DatabaseListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]Database `json:"value,omitempty"`
+}
+
+// DatabaseMetric is represents Azure SQL Database metrics.
+type DatabaseMetric struct {
+ ResourceName *string `json:"resourceName,omitempty"`
+ DisplayName *string `json:"displayName,omitempty"`
+ CurrentValue *float64 `json:"currentValue,omitempty"`
+ Limit *float64 `json:"limit,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+ NextResetTime *date.Time `json:"nextResetTime,omitempty"`
+}
+
+// DatabaseMetricListResult is represents the response to a List Azure SQL
+// Database metrics request.
+type DatabaseMetricListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]DatabaseMetric `json:"value,omitempty"`
+}
+
+// DatabaseProperties is represents the properties of an Azure SQL Database.
+type DatabaseProperties struct {
+ Collation *string `json:"collation,omitempty"`
+ CreationDate *date.Time `json:"creationDate,omitempty"`
+ ContainmentState *int64 `json:"containmentState,omitempty"`
+ CurrentServiceObjectiveID *uuid.UUID `json:"currentServiceObjectiveId,omitempty"`
+ DatabaseID *string `json:"databaseId,omitempty"`
+ EarliestRestoreDate *date.Time `json:"earliestRestoreDate,omitempty"`
+ CreateMode CreateMode `json:"createMode,omitempty"`
+ SourceDatabaseID *string `json:"sourceDatabaseId,omitempty"`
+ Edition DatabaseEditions `json:"edition,omitempty"`
+ MaxSizeBytes *string `json:"maxSizeBytes,omitempty"`
+ RequestedServiceObjectiveID *uuid.UUID `json:"requestedServiceObjectiveId,omitempty"`
+ RequestedServiceObjectiveName ServiceObjectiveName `json:"requestedServiceObjectiveName,omitempty"`
+ ServiceLevelObjective ServiceObjectiveName `json:"serviceLevelObjective,omitempty"`
+ Status *string `json:"status,omitempty"`
+ ElasticPoolName *string `json:"elasticPoolName,omitempty"`
+ DefaultSecondaryLocation *string `json:"defaultSecondaryLocation,omitempty"`
+ ServiceTierAdvisors *[]ServiceTierAdvisor `json:"serviceTierAdvisors,omitempty"`
+ UpgradeHint *UpgradeHint `json:"upgradeHint,omitempty"`
+ Schemas *[]Schema `json:"schemas,omitempty"`
+ TransparentDataEncryption *[]TransparentDataEncryption `json:"transparentDataEncryption,omitempty"`
+ RecommendedIndex *[]RecommendedIndex `json:"recommendedIndex,omitempty"`
+}
+
+// ElasticPool is represents an Azure SQL Database elastic pool.
+type ElasticPool struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *ElasticPoolProperties `json:"properties,omitempty"`
+}
+
+// ElasticPoolActivity is represents the activity on an Azure SQL Elastic Pool.
+type ElasticPoolActivity struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *ElasticPoolActivityProperties `json:"properties,omitempty"`
+}
+
+// ElasticPoolActivityListResult is represents the response to a List Azure
+// SQL Elastic Pool Activity request.
+type ElasticPoolActivityListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ElasticPoolActivity `json:"value,omitempty"`
+}
+
+// ElasticPoolActivityProperties is represents the properties of an Azure SQL
+// Elastic Pool.
+type ElasticPoolActivityProperties struct {
+ EndTime *date.Time `json:"endTime,omitempty"`
+ ErrorCode *int32 `json:"errorCode,omitempty"`
+ ErrorMessage *string `json:"errorMessage,omitempty"`
+ ErrorSeverity *int32 `json:"errorSeverity,omitempty"`
+ Operation *string `json:"operation,omitempty"`
+ OperationID *string `json:"operationId,omitempty"`
+ PercentComplete *int32 `json:"percentComplete,omitempty"`
+ RequestedDatabaseDtuMax *int32 `json:"requestedDatabaseDtuMax,omitempty"`
+ RequestedDatabaseDtuMin *int32 `json:"requestedDatabaseDtuMin,omitempty"`
+ RequestedDtu *int32 `json:"requestedDtu,omitempty"`
+ RequestedElasticPoolName *string `json:"requestedElasticPoolName,omitempty"`
+ RequestedStorageLimitInGB *int64 `json:"requestedStorageLimitInGB,omitempty"`
+ ElasticPoolName *string `json:"elasticPoolName,omitempty"`
+ ServerName *string `json:"serverName,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ State *string `json:"state,omitempty"`
+}
+
+// ElasticPoolDatabaseActivity is represents the activity on an Azure SQL
+// Elastic Pool.
+type ElasticPoolDatabaseActivity struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *ElasticPoolDatabaseActivityProperties `json:"properties,omitempty"`
+}
+
+// ElasticPoolDatabaseActivityListResult is represents the response to a List
+// Azure SQL Elastic Pool Database Activity request.
+type ElasticPoolDatabaseActivityListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ElasticPoolDatabaseActivity `json:"value,omitempty"`
+}
+
+// ElasticPoolDatabaseActivityProperties is represents the properties of an
+// Azure SQL Elastic Pool Database Activity.
+type ElasticPoolDatabaseActivityProperties struct {
+ DatabaseName *string `json:"databaseName,omitempty"`
+ EndTime *date.Time `json:"endTime,omitempty"`
+ ErrorCode *int32 `json:"errorCode,omitempty"`
+ ErrorMessage *string `json:"errorMessage,omitempty"`
+ ErrorSeverity *int32 `json:"errorSeverity,omitempty"`
+ Operation *string `json:"operation,omitempty"`
+ OperationID *string `json:"operationId,omitempty"`
+ PercentComplete *int32 `json:"percentComplete,omitempty"`
+ RequestedElasticPoolName *string `json:"requestedElasticPoolName,omitempty"`
+ CurrentElasticPoolName *string `json:"currentElasticPoolName,omitempty"`
+ CurrentServiceObjective *string `json:"currentServiceObjective,omitempty"`
+ RequestedServiceObjective *string `json:"requestedServiceObjective,omitempty"`
+ ServerName *string `json:"serverName,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ State *string `json:"state,omitempty"`
+}
+
+// ElasticPoolListResult is represents the response to a List Azure SQL
+// Elastic Pool request.
+type ElasticPoolListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ElasticPool `json:"value,omitempty"`
+}
+
+// ElasticPoolProperties is represents the properties of an Azure SQL Elastic
+// Pool.
+type ElasticPoolProperties struct {
+ CreationDate *date.Time `json:"creationDate,omitempty"`
+ State ElasticPoolState `json:"state,omitempty"`
+ Edition ElasticPoolEditions `json:"edition,omitempty"`
+ Dtu *int32 `json:"dtu,omitempty"`
+ DatabaseDtuMax *int32 `json:"databaseDtuMax,omitempty"`
+ DatabaseDtuMin *int32 `json:"databaseDtuMin,omitempty"`
+ StorageMB *int32 `json:"storageMB,omitempty"`
+}
+
+// OperationImpact is represents impact of an operation, both in absolute and
+// relative terms.
+type OperationImpact struct {
+ Name *string `json:"name,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+ ChangeValueAbsolute *float64 `json:"changeValueAbsolute,omitempty"`
+ ChangeValueRelative *float64 `json:"changeValueRelative,omitempty"`
+}
+
+// RecommendedDatabaseProperties is represents the properties of a recommended
+// Azure SQL Database being upgraded.
+type RecommendedDatabaseProperties struct {
+ Name *string `json:"Name,omitempty"`
+ TargetEdition TargetDatabaseEditions `json:"TargetEdition,omitempty"`
+ TargetServiceLevelObjective *string `json:"TargetServiceLevelObjective,omitempty"`
+}
+
+// RecommendedElasticPool is represents an Azure SQL Recommended Elastic Pool.
+type RecommendedElasticPool struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *RecommendedElasticPoolProperties `json:"properties,omitempty"`
+}
+
+// RecommendedElasticPoolListMetricsResult is represents the response to a
+// List Azure SQL Recommended Elastic Pool metrics request.
+type RecommendedElasticPoolListMetricsResult struct {
+ autorest.Response `json:"-"`
+ Value *[]RecommendedElasticPoolMetric `json:"value,omitempty"`
+}
+
+// RecommendedElasticPoolListResult is represents the response to a List Azure
+// SQL Recommended Elastic Pool request.
+type RecommendedElasticPoolListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]RecommendedElasticPool `json:"value,omitempty"`
+}
+
+// RecommendedElasticPoolMetric is represents Azure SQL recommended elastic
+// pool metric.
+type RecommendedElasticPoolMetric struct {
+ DateTime *date.Time `json:"dateTime,omitempty"`
+ Dtu *float64 `json:"dtu,omitempty"`
+ SizeGB *float64 `json:"sizeGB,omitempty"`
+}
+
+// RecommendedElasticPoolProperties is represents the properties of an Azure
+// SQL Recommended Elastic Pool.
+type RecommendedElasticPoolProperties struct {
+ DatabaseEdition ElasticPoolEditions `json:"databaseEdition,omitempty"`
+ Dtu *float64 `json:"dtu,omitempty"`
+ DatabaseDtuMin *float64 `json:"databaseDtuMin,omitempty"`
+ DatabaseDtuMax *float64 `json:"databaseDtuMax,omitempty"`
+ StorageMB *float64 `json:"storageMB,omitempty"`
+ ObservationPeriodStart *date.Time `json:"observationPeriodStart,omitempty"`
+ ObservationPeriodEnd *date.Time `json:"observationPeriodEnd,omitempty"`
+ MaxObservedDtu *float64 `json:"maxObservedDtu,omitempty"`
+ MaxObservedStorageMB *float64 `json:"maxObservedStorageMB,omitempty"`
+ Databases *[]Database `json:"databases,omitempty"`
+ Metrics *[]RecommendedElasticPoolMetric `json:"metrics,omitempty"`
+}
+
+// RecommendedIndex is represents an Azure SQL Database recommended index.
+type RecommendedIndex struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *RecommendedIndexProperties `json:"properties,omitempty"`
+}
+
+// RecommendedIndexProperties is represents the properties of an Azure SQL
+// Database recommended index.
+type RecommendedIndexProperties struct {
+ Action RecommendedIndexActions `json:"action,omitempty"`
+ State RecommendedIndexStates `json:"state,omitempty"`
+ Created *date.Time `json:"created,omitempty"`
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ IndexType RecommendedIndexTypes `json:"indexType,omitempty"`
+ Schema *string `json:"schema,omitempty"`
+ Table *string `json:"table,omitempty"`
+ Columns *[]string `json:"columns,omitempty"`
+ IncludedColumns *[]string `json:"includedColumns,omitempty"`
+ IndexScript *string `json:"indexScript,omitempty"`
+ EstimatedImpact *[]OperationImpact `json:"estimatedImpact,omitempty"`
+ ReportedImpact *[]OperationImpact `json:"reportedImpact,omitempty"`
+}
+
+// Resource is resource properties
+type Resource struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+}
+
+// RestorePoint is represents an Azure SQL Database restore point.
+type RestorePoint struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *RestorePointProperties `json:"properties,omitempty"`
+}
+
+// RestorePointListResult is represents the response to a List Azure SQL
+// Database restore points request.
+type RestorePointListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]RestorePoint `json:"value,omitempty"`
+}
+
+// RestorePointProperties is represents the properties of an Azure SQL
+// Database restore point.
+type RestorePointProperties struct {
+ RestorePointType RestorePointTypes `json:"restorePointType,omitempty"`
+ RestorePointCreationDate *date.Time `json:"restorePointCreationDate,omitempty"`
+ EarliestRestoreDate *date.Time `json:"earliestRestoreDate,omitempty"`
+}
+
+// Schema is represents an Azure SQL Database schema.
+type Schema struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *SchemaProperties `json:"properties,omitempty"`
+}
+
+// SchemaProperties is represents the properties of an Azure SQL Database
+// schema.
+type SchemaProperties struct {
+ Tables *[]Table `json:"tables,omitempty"`
+}
+
+// Server is represents an Azure SQL server.
+type Server struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *ServerProperties `json:"properties,omitempty"`
+}
+
+// ServerListResult is represents the response to a Get Azure SQL server
+// request.
+type ServerListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]Server `json:"value,omitempty"`
+}
+
+// ServerMetric is represents Azure SQL server metrics.
+type ServerMetric struct {
+ ResourceName *string `json:"resourceName,omitempty"`
+ DisplayName *string `json:"displayName,omitempty"`
+ CurrentValue *float64 `json:"currentValue,omitempty"`
+ Limit *float64 `json:"limit,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+ NextResetTime *date.Time `json:"nextResetTime,omitempty"`
+}
+
+// ServerMetricListResult is represents the response to a List Azure SQL
+// server metrics request.
+type ServerMetricListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ServerMetric `json:"value,omitempty"`
+}
+
+// ServerProperties is represents the properties of an Azure SQL server.
+type ServerProperties struct {
+ FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty"`
+ Version ServerVersion `json:"version,omitempty"`
+ AdministratorLogin *string `json:"administratorLogin,omitempty"`
+ AdministratorLoginPassword *string `json:"administratorLoginPassword,omitempty"`
+}
+
+// ServiceObjective is represents an Azure SQL Database Service Objective.
+type ServiceObjective struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ *ServiceObjectiveProperties `json:"properties,omitempty"`
+}
+
+// ServiceObjectiveListResult is represents the response to a Get Azure SQL
+// Database Service Objectives request.
+type ServiceObjectiveListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ServiceObjective `json:"value,omitempty"`
+}
+
+// ServiceObjectiveProperties is represents the properties of an Azure SQL
+// Database Service Objective.
+type ServiceObjectiveProperties struct {
+ ServiceObjectiveName *string `json:"serviceObjectiveName,omitempty"`
+ IsDefault *bool `json:"isDefault,omitempty"`
+ IsSystem *bool `json:"isSystem,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// ServiceTierAdvisor is represents a Service Tier Advisor.
+type ServiceTierAdvisor struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ *ServiceTierAdvisorProperties `json:"properties,omitempty"`
+}
+
+// ServiceTierAdvisorListResult is represents the response to a list service
+// tier advisor request.
+type ServiceTierAdvisorListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ServiceTierAdvisor `json:"value,omitempty"`
+}
+
+// ServiceTierAdvisorProperties is represents the properties of a Service Tier
+// Advisor.
+type ServiceTierAdvisorProperties struct {
+ ObservationPeriodStart *date.Time `json:"observationPeriodStart,omitempty"`
+ ObservationPeriodEnd *date.Time `json:"observationPeriodEnd,omitempty"`
+ ActiveTimeRatio *float64 `json:"activeTimeRatio,omitempty"`
+ MinDtu *float64 `json:"minDtu,omitempty"`
+ AvgDtu *float64 `json:"avgDtu,omitempty"`
+ MaxDtu *float64 `json:"maxDtu,omitempty"`
+ MaxSizeInGB *float64 `json:"maxSizeInGB,omitempty"`
+ ServiceLevelObjectiveUsageMetrics *[]SloUsageMetric `json:"serviceLevelObjectiveUsageMetrics,omitempty"`
+ CurrentServiceLevelObjective *uuid.UUID `json:"currentServiceLevelObjective,omitempty"`
+ CurrentServiceLevelObjectiveID *uuid.UUID `json:"currentServiceLevelObjectiveId,omitempty"`
+ UsageBasedRecommendationServiceLevelObjective *string `json:"usageBasedRecommendationServiceLevelObjective,omitempty"`
+ UsageBasedRecommendationServiceLevelObjectiveID *uuid.UUID `json:"usageBasedRecommendationServiceLevelObjectiveId,omitempty"`
+ DatabaseSizeBasedRecommendationServiceLevelObjective *string `json:"databaseSizeBasedRecommendationServiceLevelObjective,omitempty"`
+ DatabaseSizeBasedRecommendationServiceLevelObjectiveID *uuid.UUID `json:"databaseSizeBasedRecommendationServiceLevelObjectiveId,omitempty"`
+ DisasterPlanBasedRecommendationServiceLevelObjective *string `json:"disasterPlanBasedRecommendationServiceLevelObjective,omitempty"`
+ DisasterPlanBasedRecommendationServiceLevelObjectiveID *uuid.UUID `json:"disasterPlanBasedRecommendationServiceLevelObjectiveId,omitempty"`
+ OverallRecommendationServiceLevelObjective *string `json:"overallRecommendationServiceLevelObjective,omitempty"`
+ OverallRecommendationServiceLevelObjectiveID *uuid.UUID `json:"overallRecommendationServiceLevelObjectiveId,omitempty"`
+ Confidence *float64 `json:"confidence,omitempty"`
+}
+
+// SloUsageMetric is represents a Slo Usage Metric.
+type SloUsageMetric struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ ServiceLevelObjective ServiceObjectiveName `json:"serviceLevelObjective,omitempty"`
+ ServiceLevelObjectiveID *uuid.UUID `json:"serviceLevelObjectiveId,omitempty"`
+ InRangeTimeRatio *float64 `json:"inRangeTimeRatio,omitempty"`
+}
+
+// SubResource is subresource properties
+type SubResource struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+}
+
+// Table is represents an Azure SQL Database table.
+type Table struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ *TableProperties `json:"properties,omitempty"`
+}
+
+// TableProperties is represents the properties of an Azure SQL Database table.
+type TableProperties struct {
+ TableType TableType `json:"tableType,omitempty"`
+ Columns *[]Column `json:"columns,omitempty"`
+ RecommendedIndexes *[]RecommendedIndex `json:"recommendedIndexes,omitempty"`
+}
+
+// TransparentDataEncryption is represents an Azure SQL Database Transparent
+// Data Encryption .
+type TransparentDataEncryption struct {
+ autorest.Response `json:"-"`
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ *TransparentDataEncryptionProperties `json:"properties,omitempty"`
+}
+
+// TransparentDataEncryptionActivity is represents an Azure SQL Database
+// Transparent Data Encryption Scan.
+type TransparentDataEncryptionActivity struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ *TransparentDataEncryptionActivityProperties `json:"properties,omitempty"`
+}
+
+// TransparentDataEncryptionActivityListResult is represents the response to a
+// List Azure SQL Database Transparent Data Encryption Activity request.
+type TransparentDataEncryptionActivityListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]TransparentDataEncryptionActivity `json:"value,omitempty"`
+}
+
+// TransparentDataEncryptionActivityProperties is represents the properties of
+// an Azure SQL Database Transparent Data Encryption Scan.
+type TransparentDataEncryptionActivityProperties struct {
+ Status TransparentDataEncryptionActivityStates `json:"status,omitempty"`
+ PercentComplete *float64 `json:"percentComplete,omitempty"`
+}
+
+// TransparentDataEncryptionProperties is represents the properties of an
+// Azure SQL Database Transparent Data Encryption.
+type TransparentDataEncryptionProperties struct {
+ Status TransparentDataEncryptionStates `json:"status,omitempty"`
+}
+
+// UpgradeHint is represents a Upgrade Hint.
+type UpgradeHint struct {
+ Name *string `json:"name,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ TargetServiceLevelObjective *string `json:"targetServiceLevelObjective,omitempty"`
+ TargetServiceLevelObjectiveID *uuid.UUID `json:"targetServiceLevelObjectiveId,omitempty"`
+}
+
+// UpgradeRecommendedElasticPoolProperties is represents the properties of a
+// Azure SQL Recommended Elastic Pool being upgraded.
+type UpgradeRecommendedElasticPoolProperties struct {
+ Name *string `json:"Name,omitempty"`
+ Edition TargetElasticPoolEditions `json:"Edition,omitempty"`
+ Dtu *int32 `json:"Dtu,omitempty"`
+ StorageMb *int32 `json:"StorageMb,omitempty"`
+ DatabaseDtuMin *int32 `json:"DatabaseDtuMin,omitempty"`
+ DatabaseDtuMax *int32 `json:"DatabaseDtuMax,omitempty"`
+ DatabaseCollection *[]string `json:"DatabaseCollection,omitempty"`
+ IncludeAllDatabases *bool `json:"IncludeAllDatabases,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/recommendedelasticpools.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/recommendedelasticpools.go
new file mode 100644
index 000000000..2dbe6611e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/recommendedelasticpools.go
@@ -0,0 +1,380 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "net/http"
+)
+
+// RecommendedElasticPoolsClient is the provides create, read, update and
+// delete functionality for Azure SQL resources including servers, databases,
+// elastic pools, recommendations, operations, and usage metrics.
+type RecommendedElasticPoolsClient struct {
+ ManagementClient
+}
+
+// NewRecommendedElasticPoolsClient creates an instance of the
+// RecommendedElasticPoolsClient client.
+func NewRecommendedElasticPoolsClient(subscriptionID string) RecommendedElasticPoolsClient {
+ return NewRecommendedElasticPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewRecommendedElasticPoolsClientWithBaseURI creates an instance of the
+// RecommendedElasticPoolsClient client.
+func NewRecommendedElasticPoolsClientWithBaseURI(baseURI string, subscriptionID string) RecommendedElasticPoolsClient {
+ return RecommendedElasticPoolsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Get gets information about an Azure SQL Recommended Elastic Pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// recommendedElasticPoolName is the name of the Azure SQL Recommended
+// Elastic Pool to be retrieved.
+func (client RecommendedElasticPoolsClient) Get(resourceGroupName string, serverName string, recommendedElasticPoolName string) (result RecommendedElasticPool, err error) {
+ req, err := client.GetPreparer(resourceGroupName, serverName, recommendedElasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "Get", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "Get", resp, "Failure sending request")
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client RecommendedElasticPoolsClient) GetPreparer(resourceGroupName string, serverName string, recommendedElasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recommendedElasticPoolName": autorest.Encode("path", recommendedElasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecommendedElasticPoolsClient) GetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client RecommendedElasticPoolsClient) GetResponder(resp *http.Response) (result RecommendedElasticPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetDatabases gets information about an Azure SQL database inside of an
+// Azure SQL Recommended Elastic Pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// recommendedElasticPoolName is the name of the Azure SQL Elastic Pool to be
+// retrieved. databaseName is the name of the Azure SQL database to be
+// retrieved.
+func (client RecommendedElasticPoolsClient) GetDatabases(resourceGroupName string, serverName string, recommendedElasticPoolName string, databaseName string) (result Database, err error) {
+ req, err := client.GetDatabasesPreparer(resourceGroupName, serverName, recommendedElasticPoolName, databaseName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "GetDatabases", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetDatabasesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "GetDatabases", resp, "Failure sending request")
+ }
+
+ result, err = client.GetDatabasesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "GetDatabases", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetDatabasesPreparer prepares the GetDatabases request.
+func (client RecommendedElasticPoolsClient) GetDatabasesPreparer(resourceGroupName string, serverName string, recommendedElasticPoolName string, databaseName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "databaseName": autorest.Encode("path", databaseName),
+ "recommendedElasticPoolName": autorest.Encode("path", recommendedElasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}/databases/{databaseName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetDatabasesSender sends the GetDatabases request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecommendedElasticPoolsClient) GetDatabasesSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetDatabasesResponder handles the response to the GetDatabases request. The method always
+// closes the http.Response Body.
+func (client RecommendedElasticPoolsClient) GetDatabasesResponder(resp *http.Response) (result Database, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List returns information about Azure SQL Recommended Elastic Pools.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client RecommendedElasticPoolsClient) List(resourceGroupName string, serverName string) (result RecommendedElasticPoolListResult, err error) {
+ req, err := client.ListPreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "List", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "List", resp, "Failure sending request")
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client RecommendedElasticPoolsClient) ListPreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecommendedElasticPoolsClient) ListSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client RecommendedElasticPoolsClient) ListResponder(resp *http.Response) (result RecommendedElasticPoolListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListDatabases returns information about an Azure SQL database inside of an
+// Azure SQL Recommended Elastic Pool.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// recommendedElasticPoolName is the name of the Azure SQL Recommended
+// Elastic Pool to be retrieved.
+func (client RecommendedElasticPoolsClient) ListDatabases(resourceGroupName string, serverName string, recommendedElasticPoolName string) (result DatabaseListResult, err error) {
+ req, err := client.ListDatabasesPreparer(resourceGroupName, serverName, recommendedElasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "ListDatabases", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListDatabasesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "ListDatabases", resp, "Failure sending request")
+ }
+
+ result, err = client.ListDatabasesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "ListDatabases", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListDatabasesPreparer prepares the ListDatabases request.
+func (client RecommendedElasticPoolsClient) ListDatabasesPreparer(resourceGroupName string, serverName string, recommendedElasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recommendedElasticPoolName": autorest.Encode("path", recommendedElasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}/databases", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListDatabasesSender sends the ListDatabases request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecommendedElasticPoolsClient) ListDatabasesSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListDatabasesResponder handles the response to the ListDatabases request. The method always
+// closes the http.Response Body.
+func (client RecommendedElasticPoolsClient) ListDatabasesResponder(resp *http.Response) (result DatabaseListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListMetrics returns information about an recommended elastic pool metrics.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// recommendedElasticPoolName is the name of the Azure SQL Recommended
+// Elastic Pool to be retrieved.
+func (client RecommendedElasticPoolsClient) ListMetrics(resourceGroupName string, serverName string, recommendedElasticPoolName string) (result RecommendedElasticPoolListMetricsResult, err error) {
+ req, err := client.ListMetricsPreparer(resourceGroupName, serverName, recommendedElasticPoolName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "ListMetrics", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListMetricsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "ListMetrics", resp, "Failure sending request")
+ }
+
+ result, err = client.ListMetricsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.RecommendedElasticPoolsClient", "ListMetrics", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListMetricsPreparer prepares the ListMetrics request.
+func (client RecommendedElasticPoolsClient) ListMetricsPreparer(resourceGroupName string, serverName string, recommendedElasticPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recommendedElasticPoolName": autorest.Encode("path", recommendedElasticPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}/metrics", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListMetricsSender sends the ListMetrics request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecommendedElasticPoolsClient) ListMetricsSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListMetricsResponder handles the response to the ListMetrics request. The method always
+// closes the http.Response Body.
+func (client RecommendedElasticPoolsClient) ListMetricsResponder(resp *http.Response) (result RecommendedElasticPoolListMetricsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/servers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/servers.go
new file mode 100644
index 000000000..11e4a35e4
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/servers.go
@@ -0,0 +1,553 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "net/http"
+)
+
+// ServersClient is the provides create, read, update and delete functionality
+// for Azure SQL resources including servers, databases, elastic pools,
+// recommendations, operations, and usage metrics.
+type ServersClient struct {
+ ManagementClient
+}
+
+// NewServersClient creates an instance of the ServersClient client.
+func NewServersClient(subscriptionID string) ServersClient {
+ return NewServersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewServersClientWithBaseURI creates an instance of the ServersClient client.
+func NewServersClientWithBaseURI(baseURI string, subscriptionID string) ServersClient {
+ return ServersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new Azure SQL server.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server. parameters is
+// the required parameters for creating or updating a server.
+func (client ServersClient) CreateOrUpdate(resourceGroupName string, serverName string, parameters Server) (result Server, err error) {
+ req, err := client.CreateOrUpdatePreparer(resourceGroupName, serverName, parameters)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "CreateOrUpdate", nil, "Failure preparing request")
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "CreateOrUpdate", resp, "Failure sending request")
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ServersClient) CreateOrUpdatePreparer(resourceGroupName string, serverName string, parameters Server) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ServersClient) CreateOrUpdateResponder(resp *http.Response) (result Server, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a SQL server.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client ServersClient) Delete(resourceGroupName string, serverName string) (result autorest.Response, err error) {
+ req, err := client.DeletePreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "Delete", nil, "Failure preparing request")
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "Delete", resp, "Failure sending request")
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ServersClient) DeletePreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ServersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetByResourceGroup gets information about an Azure SQL server.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client ServersClient) GetByResourceGroup(resourceGroupName string, serverName string) (result Server, err error) {
+ req, err := client.GetByResourceGroupPreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "GetByResourceGroup", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "GetByResourceGroup", resp, "Failure sending request")
+ }
+
+ result, err = client.GetByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "GetByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetByResourceGroupPreparer prepares the GetByResourceGroup request.
+func (client ServersClient) GetByResourceGroupPreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetByResourceGroupSender sends the GetByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) GetByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetByResourceGroupResponder handles the response to the GetByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ServersClient) GetByResourceGroupResponder(resp *http.Response) (result Server, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetServiceObjective gets information about an Azure SQL database Service
+// Objective.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+// serviceObjectiveName is the name of the service objective to retrieve.
+func (client ServersClient) GetServiceObjective(resourceGroupName string, serverName string, serviceObjectiveName string) (result ServiceObjective, err error) {
+ req, err := client.GetServiceObjectivePreparer(resourceGroupName, serverName, serviceObjectiveName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "GetServiceObjective", nil, "Failure preparing request")
+ }
+
+ resp, err := client.GetServiceObjectiveSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "GetServiceObjective", resp, "Failure sending request")
+ }
+
+ result, err = client.GetServiceObjectiveResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "GetServiceObjective", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetServiceObjectivePreparer prepares the GetServiceObjective request.
+func (client ServersClient) GetServiceObjectivePreparer(resourceGroupName string, serverName string, serviceObjectiveName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "serviceObjectiveName": autorest.Encode("path", serviceObjectiveName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/serviceObjectives/{serviceObjectiveName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetServiceObjectiveSender sends the GetServiceObjective request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) GetServiceObjectiveSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetServiceObjectiveResponder handles the response to the GetServiceObjective request. The method always
+// closes the http.Response Body.
+func (client ServersClient) GetServiceObjectiveResponder(resp *http.Response) (result ServiceObjective, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List returns information about an Azure SQL server.
+func (client ServersClient) List() (result ServerListResult, err error) {
+ req, err := client.ListPreparer()
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "List", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "List", resp, "Failure sending request")
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ServersClient) ListPreparer() (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Sql/servers", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) ListSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ServersClient) ListResponder(resp *http.Response) (result ServerListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup returns information about an Azure SQL server.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal.
+func (client ServersClient) ListByResourceGroup(resourceGroupName string) (result ServerListResult, err error) {
+ req, err := client.ListByResourceGroupPreparer(resourceGroupName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "ListByResourceGroup", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "ListByResourceGroup", resp, "Failure sending request")
+ }
+
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ServersClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ServersClient) ListByResourceGroupResponder(resp *http.Response) (result ServerListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListServiceObjectives returns information about Azure SQL database Service
+// Objectives.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client ServersClient) ListServiceObjectives(resourceGroupName string, serverName string) (result ServiceObjectiveListResult, err error) {
+ req, err := client.ListServiceObjectivesPreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "ListServiceObjectives", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListServiceObjectivesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "ListServiceObjectives", resp, "Failure sending request")
+ }
+
+ result, err = client.ListServiceObjectivesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "ListServiceObjectives", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListServiceObjectivesPreparer prepares the ListServiceObjectives request.
+func (client ServersClient) ListServiceObjectivesPreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/serviceObjectives", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListServiceObjectivesSender sends the ListServiceObjectives request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) ListServiceObjectivesSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListServiceObjectivesResponder handles the response to the ListServiceObjectives request. The method always
+// closes the http.Response Body.
+func (client ServersClient) ListServiceObjectivesResponder(resp *http.Response) (result ServiceObjectiveListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListUsages returns information about Azure SQL server usage.
+//
+// resourceGroupName is the name of the resource group that contains the
+// resource. You can obtain this value from the Azure Resource Manager API or
+// the portal. serverName is the name of the Azure SQL server.
+func (client ServersClient) ListUsages(resourceGroupName string, serverName string) (result ServerMetricListResult, err error) {
+ req, err := client.ListUsagesPreparer(resourceGroupName, serverName)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "ListUsages", nil, "Failure preparing request")
+ }
+
+ resp, err := client.ListUsagesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ServersClient", "ListUsages", resp, "Failure sending request")
+ }
+
+ result, err = client.ListUsagesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ServersClient", "ListUsages", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListUsagesPreparer prepares the ListUsages request.
+func (client ServersClient) ListUsagesPreparer(resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ queryParameters := map[string]interface{}{
+ "api-version": client.APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/usages", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListUsagesSender sends the ListUsages request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServersClient) ListUsagesSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListUsagesResponder handles the response to the ListUsages request. The method always
+// closes the http.Response Body.
+func (client ServersClient) ListUsagesResponder(resp *http.Response) (result ServerMetricListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/version.go
new file mode 100644
index 000000000..2fabb4531
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/sql/version.go
@@ -0,0 +1,60 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+const (
+ major = "8"
+ minor = "1"
+ patch = "0"
+ tag = "beta"
+ userAgentFormat = "Azure-SDK-For-Go/%s arm-%s/%s"
+)
+
+// cached results of UserAgent and Version to prevent repeated operations.
+var (
+ userAgent string
+ version string
+)
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ if userAgent == "" {
+ userAgent = fmt.Sprintf(userAgentFormat, Version(), "sql", "2014-04-01")
+ }
+ return userAgent
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ if version == "" {
+ versionBuilder := bytes.NewBufferString(fmt.Sprintf("%s.%s.%s", major, minor, patch))
+ if tag != "" {
+ versionBuilder.WriteRune('-')
+ versionBuilder.WriteString(strings.TrimPrefix(tag, "-"))
+ }
+ version = string(versionBuilder.Bytes())
+ }
+ return version
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go
index 0477c7471..606191fbb 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go
@@ -849,7 +849,7 @@ func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Re
// To copy a DB snapshot from a shared manual DB snapshot, SourceDBSnapshotIdentifier
// must be the Amazon Resource Name (ARN) of the shared DB snapshot.
//
-// You can copy an encrypted DB snapshot from another AWS Region. In that case,
+// You can copy an encrypted DB snapshot from another AWS region. In that case,
// the region where you call the CopyDBSnapshot action is the destination region
// for the encrypted DB snapshot to be copied to. To copy an encrypted DB snapshot
// from another region, you must provide the following values:
@@ -900,7 +900,7 @@ func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Re
// in the presigned URL.
//
// For more information on copying encrypted snapshots from one region to another,
-// see Copying an Encrypted DB Snapshot to Another Region (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html#USER_CopySnapshot.Encrypted.CrossRegion)
+// see Copying a DB Snapshot (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html#USER_CopyDBSnapshot)
// in the Amazon RDS User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1565,6 +1565,9 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl
// Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL
// that acts as a Read Replica of a source DB instance.
//
+// Amazon Aurora does not support this action. You must call the CreateDBInstance
+// action to create a DB instance for an Aurora DB cluster.
+//
// All Read Replica DB instances are created as Single-AZ deployments with backups
// disabled. All other DB instance attributes (including DB security groups
// and DB parameter groups) are inherited from the source DB instance, except
@@ -2453,7 +2456,9 @@ func (c *RDS) DeleteDBClusterParameterGroupRequest(input *DeleteDBClusterParamet
//
// Returned Error Codes:
// * ErrCodeInvalidDBParameterGroupStateFault "InvalidDBParameterGroupState"
-// The DB parameter group cannot be deleted because it is in use.
+// The DB parameter group is in use or is in an invalid state. If you are attempting
+// to delete the parameter group, you cannot delete it when the parameter group
+// is in this state.
//
// * ErrCodeDBParameterGroupNotFoundFault "DBParameterGroupNotFound"
// DBParameterGroupName does not refer to an existing DB parameter group.
@@ -2744,7 +2749,9 @@ func (c *RDS) DeleteDBParameterGroupRequest(input *DeleteDBParameterGroupInput)
//
// Returned Error Codes:
// * ErrCodeInvalidDBParameterGroupStateFault "InvalidDBParameterGroupState"
-// The DB parameter group cannot be deleted because it is in use.
+// The DB parameter group is in use or is in an invalid state. If you are attempting
+// to delete the parameter group, you cannot delete it when the parameter group
+// is in this state.
//
// * ErrCodeDBParameterGroupNotFoundFault "DBParameterGroupNotFound"
// DBParameterGroupName does not refer to an existing DB parameter group.
@@ -6889,7 +6896,9 @@ func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParamet
// DBParameterGroupName does not refer to an existing DB parameter group.
//
// * ErrCodeInvalidDBParameterGroupStateFault "InvalidDBParameterGroupState"
-// The DB parameter group cannot be deleted because it is in use.
+// The DB parameter group is in use or is in an invalid state. If you are attempting
+// to delete the parameter group, you cannot delete it when the parameter group
+// is in this state.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBClusterParameterGroup
func (c *RDS) ModifyDBClusterParameterGroup(input *ModifyDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) {
@@ -6967,14 +6976,15 @@ func (c *RDS) ModifyDBClusterSnapshotAttributeRequest(input *ModifyDBClusterSnap
// snapshot. Use the value all to make the manual DB cluster snapshot public,
// which means that it can be copied or restored by all AWS accounts. Do not
// add the all value for any manual DB cluster snapshots that contain private
-// information that you don't want available to all AWS accounts.
+// information that you don't want available to all AWS accounts. If a manual
+// DB cluster snapshot is encrypted, it can be shared, but only by specifying
+// a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't
+// use all as a value for that parameter in this case.
//
// To view which AWS accounts have access to copy or restore a manual DB cluster
// snapshot, or whether a manual DB cluster snapshot public or private, use
// the DescribeDBClusterSnapshotAttributes API action.
//
-// If a manual DB cluster snapshot is encrypted, it cannot be shared.
-//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7225,7 +7235,9 @@ func (c *RDS) ModifyDBParameterGroupRequest(input *ModifyDBParameterGroupInput)
// DBParameterGroupName does not refer to an existing DB parameter group.
//
// * ErrCodeInvalidDBParameterGroupStateFault "InvalidDBParameterGroupState"
-// The DB parameter group cannot be deleted because it is in use.
+// The DB parameter group is in use or is in an invalid state. If you are attempting
+// to delete the parameter group, you cannot delete it when the parameter group
+// is in this state.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBParameterGroup
func (c *RDS) ModifyDBParameterGroup(input *ModifyDBParameterGroupInput) (*DBParameterGroupNameMessage, error) {
@@ -7388,14 +7400,15 @@ func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeI
// Uses the value all to make the manual DB snapshot public, which means it
// can be copied or restored by all AWS accounts. Do not add the all value for
// any manual DB snapshots that contain private information that you don't want
-// available to all AWS accounts.
+// available to all AWS accounts. If the manual DB snapshot is encrypted, it
+// can be shared, but only by specifying a list of authorized AWS account IDs
+// for the ValuesToAdd parameter. You can't use all as a value for that parameter
+// in this case.
//
// To view which AWS accounts have access to copy or restore a manual DB snapshot,
// or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes
// API action.
//
-// If the manual DB snapshot is encrypted, it cannot be shared.
-//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -8407,7 +8420,9 @@ func (c *RDS) ResetDBClusterParameterGroupRequest(input *ResetDBClusterParameter
//
// Returned Error Codes:
// * ErrCodeInvalidDBParameterGroupStateFault "InvalidDBParameterGroupState"
-// The DB parameter group cannot be deleted because it is in use.
+// The DB parameter group is in use or is in an invalid state. If you are attempting
+// to delete the parameter group, you cannot delete it when the parameter group
+// is in this state.
//
// * ErrCodeDBParameterGroupNotFoundFault "DBParameterGroupNotFound"
// DBParameterGroupName does not refer to an existing DB parameter group.
@@ -8480,7 +8495,7 @@ func (c *RDS) ResetDBParameterGroupRequest(input *ResetDBParameterGroupInput) (r
// ResetDBParameterGroup API operation for Amazon Relational Database Service.
//
// Modifies the parameters of a DB parameter group to the engine/system default
-// value. To reset specific parameters submit a list of the following: ParameterName
+// value. To reset specific parameters, provide a list of the following: ParameterName
// and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup
// name and ResetAllParameters parameters. When resetting the entire group,
// dynamic parameters are updated immediately and static parameters are set
@@ -8496,7 +8511,9 @@ func (c *RDS) ResetDBParameterGroupRequest(input *ResetDBParameterGroupInput) (r
//
// Returned Error Codes:
// * ErrCodeInvalidDBParameterGroupStateFault "InvalidDBParameterGroupState"
-// The DB parameter group cannot be deleted because it is in use.
+// The DB parameter group is in use or is in an invalid state. If you are attempting
+// to delete the parameter group, you cannot delete it when the parameter group
+// is in this state.
//
// * ErrCodeDBParameterGroupNotFoundFault "DBParameterGroupNotFound"
// DBParameterGroupName does not refer to an existing DB parameter group.
@@ -8571,7 +8588,7 @@ func (c *RDS) RestoreDBClusterFromS3Request(input *RestoreDBClusterFromS3Input)
// Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket.
// Amazon RDS must be authorized to access the Amazon S3 bucket and the data
// must be created using the Percona XtraBackup utility as described in Migrating
-// Data from MySQL by Using an Amazon S3 Bucket (AmazonRDS/latest/UserGuide/Aurora.Migrate.MySQL.html#Aurora.Migrate.MySQL.S3).
+// Data from MySQL by Using an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Migrate.MySQL.html#Aurora.Migrate.MySQL.S3).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -8857,23 +8874,19 @@ func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPoin
// * ErrCodeDBClusterAlreadyExistsFault "DBClusterAlreadyExistsFault"
// User already has a DB cluster with the given identifier.
//
+// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault"
+// DBClusterIdentifier does not refer to an existing DB cluster.
+//
// * ErrCodeDBClusterQuotaExceededFault "DBClusterQuotaExceededFault"
// User attempted to create a new DB cluster and the user has already reached
// the maximum allowed DB cluster quota.
//
-// * ErrCodeStorageQuotaExceededFault "StorageQuotaExceeded"
-// Request would result in user exceeding the allowed amount of storage available
-// across all DB instances.
+// * ErrCodeDBClusterSnapshotNotFoundFault "DBClusterSnapshotNotFoundFault"
+// DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.
//
// * ErrCodeDBSubnetGroupNotFoundFault "DBSubnetGroupNotFoundFault"
// DBSubnetGroupName does not refer to an existing DB subnet group.
//
-// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault"
-// DBClusterIdentifier does not refer to an existing DB cluster.
-//
-// * ErrCodeDBClusterSnapshotNotFoundFault "DBClusterSnapshotNotFoundFault"
-// DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.
-//
// * ErrCodeInsufficientDBClusterCapacityFault "InsufficientDBClusterCapacityFault"
// The DB cluster does not have enough capacity for the current operation.
//
@@ -8882,36 +8895,36 @@ func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPoin
// able to resolve this error by updating your subnet group to use different
// Availability Zones that have more storage available.
//
-// * ErrCodeInvalidDBSnapshotStateFault "InvalidDBSnapshotState"
-// The state of the DB snapshot does not allow deletion.
-//
// * ErrCodeInvalidDBClusterSnapshotStateFault "InvalidDBClusterSnapshotStateFault"
// The supplied value is not a valid DB cluster snapshot state.
//
-// * ErrCodeStorageQuotaExceededFault "StorageQuotaExceeded"
-// Request would result in user exceeding the allowed amount of storage available
-// across all DB instances.
+// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault"
+// The DB cluster is not in a valid state.
//
-// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault"
-// DB subnet group does not cover all Availability Zones after it is created
-// because users' change.
+// * ErrCodeInvalidDBSnapshotStateFault "InvalidDBSnapshotState"
+// The state of the DB snapshot does not allow deletion.
//
// * ErrCodeInvalidRestoreFault "InvalidRestoreFault"
// Cannot restore from vpc backup to non-vpc DB instance.
//
-// * ErrCodeDBSubnetGroupNotFoundFault "DBSubnetGroupNotFoundFault"
-// DBSubnetGroupName does not refer to an existing DB subnet group.
-//
// * ErrCodeInvalidSubnet "InvalidSubnet"
// The requested subnet is invalid, or multiple subnets were requested that
// are not all in a common VPC.
//
-// * ErrCodeOptionGroupNotFoundFault "OptionGroupNotFoundFault"
-// The specified option group could not be found.
+// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault"
+// DB subnet group does not cover all Availability Zones after it is created
+// because users' change.
//
// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault"
// Error accessing KMS key.
//
+// * ErrCodeOptionGroupNotFoundFault "OptionGroupNotFoundFault"
+// The specified option group could not be found.
+//
+// * ErrCodeStorageQuotaExceededFault "StorageQuotaExceeded"
+// Request would result in user exceeding the allowed amount of storage available
+// across all DB instances.
+//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RestoreDBClusterToPointInTime
func (c *RDS) RestoreDBClusterToPointInTime(input *RestoreDBClusterToPointInTimeInput) (*RestoreDBClusterToPointInTimeOutput, error) {
req, out := c.RestoreDBClusterToPointInTimeRequest(input)
@@ -10108,6 +10121,8 @@ func (s *CopyDBClusterParameterGroupOutput) SetDBClusterParameterGroup(v *DBClus
type CopyDBClusterSnapshotInput struct {
_ struct{} `type:"structure"`
+ // True to copy all tags from the source DB cluster snapshot to the target DB
+ // cluster snapshot; otherwise false. The default is false.
CopyTags *bool `type:"boolean"`
// DestinationRegion is used for presigning the request to a given region.
@@ -10170,6 +10185,9 @@ type CopyDBClusterSnapshotInput struct {
// The identifier of the DB cluster snapshot to copy. This parameter is not
// case-sensitive.
//
+ // You cannot copy an encrypted, shared DB cluster snapshot from one AWS region
+ // to another.
+ //
// Constraints:
//
// * Must contain from 1 to 63 alphanumeric characters or hyphens.
@@ -10178,6 +10196,15 @@ type CopyDBClusterSnapshotInput struct {
//
// * Cannot end with a hyphen or contain two consecutive hyphens.
//
+ // * Must specify a valid system snapshot in the "available" state.
+ //
+ // * If the source snapshot is in the same region as the copy, specify a
+ // valid DB snapshot identifier.
+ //
+ // * If the source snapshot is in a different region than the copy, specify
+ // a valid DB cluster snapshot ARN. For more information, go to Copying
+ // a DB Snapshot or DB Cluster Snapshot (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html).
+ //
// Example: my-cluster-snapshot1
//
// SourceDBClusterSnapshotIdentifier is a required field
@@ -10504,7 +10531,7 @@ type CopyDBSnapshotInput struct {
// snapshot to be copied. This identifier must be in the Amazon Resource
// Name (ARN) format for the source region. For example, if you are copying
// an encrypted DB snapshot from the us-west-2 region, then your SourceDBSnapshotIdentifier
- // would look like Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115.
+ // looks like the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115.
//
// To learn how to generate a Signature Version 4 signed request, see Authenticating
// Requests: Using Query Parameters (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
@@ -10527,7 +10554,7 @@ type CopyDBSnapshotInput struct {
//
// * If the source snapshot is in a different region than the copy, specify
// a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot
- // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html).
+ // or DB Cluster Snapshot (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html).
//
// Example: rds:mydb-2012-04-02-00-01
//
@@ -10855,6 +10882,12 @@ type CreateDBClusterInput struct {
// DestinationRegion is used for presigning the request to a given region.
DestinationRegion *string `type:"string"`
+ // A Boolean value that is true to enable mapping of AWS Identity and Access
+ // Management (IAM) accounts to database accounts, and otherwise false.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The name of the database engine to be used for this DB cluster.
//
// Valid Values: aurora
@@ -11072,6 +11105,12 @@ func (s *CreateDBClusterInput) SetDestinationRegion(v string) *CreateDBClusterIn
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *CreateDBClusterInput) SetEnableIAMDatabaseAuthentication(v bool) *CreateDBClusterInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngine sets the Engine field's value.
func (s *CreateDBClusterInput) SetEngine(v string) *CreateDBClusterInput {
s.Engine = &v
@@ -11442,6 +11481,12 @@ type CreateDBInstanceInput struct {
//
// Type: Integer
//
+ // Amazon Aurora
+ //
+ // Not applicable. Aurora cluster volumes automatically grow as the amount of
+ // data in your database increases, though you are only charged for the space
+ // that you use in an Aurora cluster volume.
+ //
// MySQL
//
// Constraints: Must be an integer from 5 to 6144.
@@ -11639,6 +11684,18 @@ type CreateDBInstanceInput struct {
// Directory Service.
DomainIAMRoleName *string `type:"string"`
+ // True to enable mapping of AWS Identity and Access Management (IAM) accounts
+ // to database accounts; otherwise false.
+ //
+ // You can enable IAM database authentication for the following database engines
+ //
+ // * For MySQL 5.6, minor version 5.6.34 or higher
+ //
+ // * For MySQL 5.7, minor version 5.7.16 or higher
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The name of the database engine to be used for this instance.
//
// Valid Values: mysql | mariadb | oracle-se1 | oracle-se2 | oracle-se | oracle-ee
@@ -11737,109 +11794,13 @@ type CreateDBInstanceInput struct {
// ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1,
// us-west-2): 5.1.73a | 5.1.73b
//
- // Oracle Database Enterprise Edition (oracle-ee)
- //
- // * Version 12.1 (available in all AWS regions except ap-south-1, ap-northeast-2):
- // 12.1.0.1.v1 | 12.1.0.1.v2
- //
- // * Version 12.1 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): 12.1.0.1.v3 | 12.1.0.1.v4 | 12.1.0.1.v5
- //
- // * Version 12.1 (available in all AWS regions): 12.1.0.2.v1
- //
- // * Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2
- // | 12.1.0.2.v3 | 12.1.0.2.v4
- //
- // * Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1,
- // us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7
- //
- // * Version 11.2 (available in all AWS regions except ap-south-1, ap-northeast-2):
- // 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3
- //
- // * Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): 11.2.0.3.v4
- //
- // * Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3
- // | 11.2.0.4.v4
- //
- // * Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5
- // | 11.2.0.4.v6 | 11.2.0.4.v7 | 11.2.0.4.v8
- //
- // Oracle Database Standard Edition (oracle-se)
- //
- // * Version 12.1 (available in all AWS regions except ap-south-1, ap-northeast-2):
- // 12.1.0.1.v1 | 12.1.0.1.v2
- //
- // * Version 12.1 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): 12.1.0.1.v3 | 12.1.0.1.v4 | 12.1.0.1.v5
- //
- // * Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1,
- // us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7
- //
- // * Version 11.2 (available in all AWS regions except ap-south-1, ap-northeast-2):
- // 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3
- //
- // * Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): 11.2.0.3.v4
- //
- // * Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3
- // | 11.2.0.4.v4
- //
- // * Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5
- // | 11.2.0.4.v6 | 11.2.0.4.v7 | 11.2.0.4.v8
- //
- // Oracle Database Standard Edition One (oracle-se1)
- //
- // * Version 12.1 (available in all AWS regions except ap-south-1, ap-northeast-2):
- // 12.1.0.1.v1 | 12.1.0.1.v2
- //
- // * Version 12.1 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): 12.1.0.1.v3 | 12.1.0.1.v4 | 12.1.0.1.v5
- //
- // * Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1,
- // us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7
- //
- // * Version 11.2 (available in all AWS regions except ap-south-1, ap-northeast-2):
- // 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3
- //
- // * Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): 11.2.0.3.v4
- //
- // * Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3
- // | 11.2.0.4.v4
- //
- // * Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5
- // | 11.2.0.4.v6 | 11.2.0.4.v7 | 11.2.0.4.v8
- //
- // Oracle Database Standard Edition Two (oracle-se2)
- //
- // * Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2
- // | 12.1.0.2.v3 | 12.1.0.2.v4
- //
- // PostgreSQL
- //
- // * Version 9.6: 9.6.1
- //
- // * Version 9.5:9.5.4 | 9.5.2
- //
- // * Version 9.4: 9.4.9 | 9.4.7 | 9.4.5 | 9.4.4 | 9.4.1
- //
- // * Version 9.3: 9.3.14 | 9.3.12 | 9.3.10 | 9.3.9 | 9.3.6 | 9.3.5 | 9.3.3
- // | 9.3.2 | 9.3.1
- //
// Oracle 12c
//
- // 12.1.0.2.v6 (supported for EE in all AWS regions, and SE2 in all AWS regions
- // except us-gov-west-1)
+ // * 12.1.0.2.v7 (supported for EE in all AWS regions, and SE2 in all AWS
+ // regions except us-gov-west-1)
+ //
+ // * 12.1.0.2.v6 (supported for EE in all AWS regions, and SE2 in all AWS
+ // regions except us-gov-west-1)
//
// * 12.1.0.2.v5 (supported for EE in all AWS regions, and SE2 in all AWS
// regions except us-gov-west-1)
@@ -11856,26 +11817,10 @@ type CreateDBInstanceInput struct {
// * 12.1.0.2.v1 (supported for EE in all AWS regions, and SE2 in all AWS
// regions except us-gov-west-1)
//
- // * 12.1.0.1.v6 (supported for EE, SE1, and SE, in all AWS regions except
- // ap-south-1, ap-northeast-2)
- //
- // * 12.1.0.1.v5 (supported for EE, SE1, and SE, in all AWS regions except
- // ap-south-1, ap-northeast-2)
- //
- // * 12.1.0.1.v4 (supported for EE, SE1, and SE, in all AWS regions except
- // ap-south-1, ap-northeast-2)
- //
- // * 12.1.0.1.v3 (supported for EE, SE1, and SE, in all AWS regions except
- // ap-south-1, ap-northeast-2)
- //
- // * 12.1.0.1.v2 (supported for EE, SE1, and SE, in all AWS regions except
- // ap-south-1, ap-northeast-2)
- //
- // * 12.1.0.1.v1 (supported for EE, SE1, and SE, in all AWS regions except
- // ap-south-1, ap-northeast-2)
- //
// Oracle 11g
//
+ // * 11.2.0.4.v11 (supported for EE, SE1, and SE, in all AWS regions)
+ //
// * 11.2.0.4.v10 (supported for EE, SE1, and SE, in all AWS regions)
//
// * 11.2.0.4.v9 (supported for EE, SE1, and SE, in all AWS regions)
@@ -11896,43 +11841,14 @@ type CreateDBInstanceInput struct {
//
// PostgreSQL
//
- // * Version 9.5 (available in these AWS regions: ap-northeast-1, ap-northeast-2,
- // ap-south-1, ap-southeast-1, ap-southeast-2, eu-central-1, eu-west-1, sa-east-1,
- // us-east-1, us-west-1, us-west-2): * 9.5.4
+ // * Version 9.6: 9.6.1
//
- // * Version 9.5 (available in these AWS regions: ap-northeast-1, ap-northeast-2,
- // ap-south-1, ap-southeast-1, ap-southeast-2, eu-central-1, eu-west-1, sa-east-1,
- // us-east-1, us-east-2, us-west-1, us-west-2): * 9.5.2
+ // * Version 9.5:9.5.4 | 9.5.2
//
- // * Version 9.4 (available in these AWS regions: ap-northeast-1, ap-northeast-2,
- // ap-south-1, ap-southeast-1, ap-southeast-2, eu-central-1, eu-west-1, sa-east-1,
- // us-east-1, us-west-1, us-west-2): * 9.4.9
+ // * Version 9.4: 9.4.9 | 9.4.7 | 9.4.5 | 9.4.4 | 9.4.1
//
- // * Version 9.4 (available in these AWS regions: ap-northeast-1, ap-northeast-2,
- // ap-south-1, ap-southeast-1, ap-southeast-2, eu-central-1, eu-west-1, sa-east-1,
- // us-east-1, us-east-2, us-west-1, us-west-2): * 9.4.7
- //
- // * Version 9.4 (available in all AWS regions): * 9.4.5
- //
- // * Version 9.4 (available in these AWS regions: ap-northeast-1, ap-northeast-2,
- // ap-southeast-1, ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1,
- // us-gov-west-1, us-west-1, us-west-2): * 9.4.4
- //
- // * Version 9.4 (available in these AWS regions: ap-northeast-1, ap-northeast-2,
- // ap-southeast-1, ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1,
- // us-east-2, us-gov-west-1, us-west-1, us-west-2): * 9.4.1
- //
- // * Version 9.3 (available in these AWS regions: ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-gov-west-1,
- // us-west-1, us-west-2): * 9.3.10 | 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9
- //
- // * Version 9.3 (available in these AWS regions: ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1,
- // us-west-2): * 9.3.1 | 9.3.2
- //
- // * Version 9.3 (available in these AWS regions: ap-northeast-1, ap-southeast-1,
- // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1,
- // us-west-2): * 9.3.12 | 9.3.14
+ // * Version 9.3: 9.3.14 | 9.3.12 | 9.3.10 | 9.3.9 | 9.3.6 | 9.3.5 | 9.3.3
+ // | 9.3.2 | 9.3.1
EngineVersion *string `type:"string"`
// The amount of Provisioned IOPS (input/output operations per second) to be
@@ -11965,13 +11881,20 @@ type CreateDBInstanceInput struct {
// The password for the master database user. Can be any printable ASCII character
// except "/", """, or "@".
//
- // Type: String
+ // Amazon Aurora
//
- // MySQL
+ // Not applicable. You specify the password for the master database user when
+ // you create your DB cluster.
+ //
+ // MariaDB
//
// Constraints: Must contain from 8 to 41 characters.
//
- // MariaDB
+ // Microsoft SQL Server
+ //
+ // Constraints: Must contain from 8 to 128 characters.
+ //
+ // MySQL
//
// Constraints: Must contain from 8 to 41 characters.
//
@@ -11979,20 +11902,35 @@ type CreateDBInstanceInput struct {
//
// Constraints: Must contain from 8 to 30 characters.
//
- // SQL Server
- //
- // Constraints: Must contain from 8 to 128 characters.
- //
// PostgreSQL
//
// Constraints: Must contain from 8 to 128 characters.
+ MasterUserPassword *string `type:"string"`
+
+ // The name for the master database user.
//
// Amazon Aurora
//
- // Constraints: Must contain from 8 to 41 characters.
- MasterUserPassword *string `type:"string"`
-
- // The name of master user for the client DB instance.
+ // Not applicable. You specify the name for the master database user when you
+ // create your DB cluster.
+ //
+ // MariaDB
+ //
+ // Constraints:
+ //
+ // * Must be 1 to 16 alphanumeric characters.
+ //
+ // * Cannot be a reserved word for the chosen database engine.
+ //
+ // Microsoft SQL Server
+ //
+ // Constraints:
+ //
+ // * Must be 1 to 128 alphanumeric characters.
+ //
+ // * First character must be a letter.
+ //
+ // * Cannot be a reserved word for the chosen database engine.
//
// MySQL
//
@@ -12004,16 +11942,6 @@ type CreateDBInstanceInput struct {
//
// * Cannot be a reserved word for the chosen database engine.
//
- // MariaDB
- //
- // Constraints:
- //
- // * Must be 1 to 16 alphanumeric characters.
- //
- // * Cannot be a reserved word for the chosen database engine.
- //
- // Type: String
- //
// Oracle
//
// Constraints:
@@ -12024,16 +11952,6 @@ type CreateDBInstanceInput struct {
//
// * Cannot be a reserved word for the chosen database engine.
//
- // SQL Server
- //
- // Constraints:
- //
- // * Must be 1 to 128 alphanumeric characters.
- //
- // * First character must be a letter.
- //
- // * Cannot be a reserved word for the chosen database engine.
- //
// PostgreSQL
//
// Constraints:
@@ -12057,8 +11975,8 @@ type CreateDBInstanceInput struct {
// The ARN for the IAM role that permits RDS to send enhanced monitoring metrics
// to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess.
- // For information on creating a monitoring role, go to To create an IAM role
- // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole).
+ // For information on creating a monitoring role, go to Setting Up and Enabling
+ // Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling).
//
// If MonitoringInterval is set to a value other than 0, then you must supply
// a MonitoringRoleArn value.
@@ -12130,8 +12048,7 @@ type CreateDBInstanceInput struct {
//
// Default: A 30-minute window selected at random from an 8-hour block of time
// per region. To see the time blocks available, see Adjusting the Preferred
- // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html)
- // in the Amazon RDS User Guide.
+ // DB Instance Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow).
//
// Constraints:
//
@@ -12340,6 +12257,12 @@ func (s *CreateDBInstanceInput) SetDomainIAMRoleName(v string) *CreateDBInstance
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *CreateDBInstanceInput) SetEnableIAMDatabaseAuthentication(v bool) *CreateDBInstanceInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngine sets the Engine field's value.
func (s *CreateDBInstanceInput) SetEngine(v string) *CreateDBInstanceInput {
s.Engine = &v
@@ -12580,6 +12503,20 @@ type CreateDBInstanceReadReplicaInput struct {
// DestinationRegion is used for presigning the request to a given region.
DestinationRegion *string `type:"string"`
+ // True to enable mapping of AWS Identity and Access Management (IAM) accounts
+ // to database accounts; otherwise false.
+ //
+ // You can enable IAM database authentication for the following database engines
+ //
+ // * For MySQL 5.6, minor version 5.6.34 or higher
+ //
+ // * For MySQL 5.7, minor version 5.7.16 or higher
+ //
+ // * Aurora 5.6 or higher.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The amount of Provisioned IOPS (input/output operations per second) to be
// initially allocated for the DB instance.
Iops *int64 `type:"integer"`
@@ -12800,6 +12737,12 @@ func (s *CreateDBInstanceReadReplicaInput) SetDestinationRegion(v string) *Creat
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *CreateDBInstanceReadReplicaInput) SetEnableIAMDatabaseAuthentication(v bool) *CreateDBInstanceReadReplicaInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetIops sets the Iops field's value.
func (s *CreateDBInstanceReadReplicaInput) SetIops(v int64) *CreateDBInstanceReadReplicaInput {
s.Iops = &v
@@ -13659,7 +13602,10 @@ func (s *CreateOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CreateOptionGr
type DBCluster struct {
_ struct{} `type:"structure"`
- // Specifies the allocated storage size in gigabytes (GB).
+ // For all database engines except Amazon Aurora, AllocatedStorage specifies
+ // the allocated storage size in gigabytes (GB). For Aurora, AllocatedStorage
+ // always returns 1, because Aurora DB cluster storage size is not fixed, but
+ // instead automatically adjusts as needed.
AllocatedStorage *int64 `type:"integer"`
// Provides a list of the AWS Identity and Access Management (IAM) roles that
@@ -13729,6 +13675,10 @@ type DBCluster struct {
// Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
HostedZoneId *string `type:"string"`
+ // True if mapping of AWS Identity and Access Management (IAM) accounts to database
+ // accounts is enabled; otherwise false.
+ IAMDatabaseAuthenticationEnabled *bool `type:"boolean"`
+
// If StorageEncrypted is true, the KMS key identifier for the encrypted DB
// cluster.
KmsKeyId *string `type:"string"`
@@ -13771,7 +13721,7 @@ type DBCluster struct {
// If a failover occurs, and the Aurora Replica that you are connected to is
// promoted to be the primary instance, your connection will be dropped. To
// continue sending your read workload to other Aurora Replicas in the cluster,
- // you can then recoonect to the reader endpoint.
+ // you can then reconnect to the reader endpoint.
ReaderEndpoint *string `type:"string"`
// Contains the identifier of the source DB cluster if this DB cluster is a
@@ -13912,6 +13862,12 @@ func (s *DBCluster) SetHostedZoneId(v string) *DBCluster {
return s
}
+// SetIAMDatabaseAuthenticationEnabled sets the IAMDatabaseAuthenticationEnabled field's value.
+func (s *DBCluster) SetIAMDatabaseAuthenticationEnabled(v bool) *DBCluster {
+ s.IAMDatabaseAuthenticationEnabled = &v
+ return s
+}
+
// SetKmsKeyId sets the KmsKeyId field's value.
func (s *DBCluster) SetKmsKeyId(v string) *DBCluster {
s.KmsKeyId = &v
@@ -14264,6 +14220,10 @@ type DBClusterSnapshot struct {
// Provides the version of the database engine for this DB cluster snapshot.
EngineVersion *string `type:"string"`
+ // True if mapping of AWS Identity and Access Management (IAM) accounts to database
+ // accounts is enabled; otherwise false.
+ IAMDatabaseAuthenticationEnabled *bool `type:"boolean"`
+
// If StorageEncrypted is true, the KMS key identifier for the encrypted DB
// cluster snapshot.
KmsKeyId *string `type:"string"`
@@ -14356,6 +14316,12 @@ func (s *DBClusterSnapshot) SetEngineVersion(v string) *DBClusterSnapshot {
return s
}
+// SetIAMDatabaseAuthenticationEnabled sets the IAMDatabaseAuthenticationEnabled field's value.
+func (s *DBClusterSnapshot) SetIAMDatabaseAuthenticationEnabled(v bool) *DBClusterSnapshot {
+ s.IAMDatabaseAuthenticationEnabled = &v
+ return s
+}
+
// SetKmsKeyId sets the KmsKeyId field's value.
func (s *DBClusterSnapshot) SetKmsKeyId(v string) *DBClusterSnapshot {
s.KmsKeyId = &v
@@ -14663,7 +14629,7 @@ type DBInstance struct {
// when returning values from CreateDBInstanceReadReplica since Read Replicas
// are only supported for these engines.
//
- // MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora
+ // MySQL, MariaDB, SQL Server, PostgreSQL
//
// Contains the name of the initial database of this instance that was provided
// at create time, if one was specified when the DB instance was created. This
@@ -14713,6 +14679,19 @@ type DBInstance struct {
// receives the Enhanced Monitoring metrics data for the DB instance.
EnhancedMonitoringResourceArn *string `type:"string"`
+ // True if mapping of AWS Identity and Access Management (IAM) accounts to database
+ // accounts is enabled; otherwise false.
+ //
+ // IAM database authentication can be enabled for the following database engines
+ //
+ // * For MySQL 5.6, minor version 5.6.34 or higher
+ //
+ // * For MySQL 5.7, minor version 5.7.16 or higher
+ //
+ // * Aurora 5.6 or higher. To enable IAM database authentication for Aurora,
+ // see DBCluster Type.
+ IAMDatabaseAuthenticationEnabled *bool `type:"boolean"`
+
// Provides the date and time the DB instance was created.
InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
@@ -14970,6 +14949,12 @@ func (s *DBInstance) SetEnhancedMonitoringResourceArn(v string) *DBInstance {
return s
}
+// SetIAMDatabaseAuthenticationEnabled sets the IAMDatabaseAuthenticationEnabled field's value.
+func (s *DBInstance) SetIAMDatabaseAuthenticationEnabled(v bool) *DBInstance {
+ s.IAMDatabaseAuthenticationEnabled = &v
+ return s
+}
+
// SetInstanceCreateTime sets the InstanceCreateTime field's value.
func (s *DBInstance) SetInstanceCreateTime(v time.Time) *DBInstance {
s.InstanceCreateTime = &v
@@ -15475,6 +15460,10 @@ type DBSnapshot struct {
// Specifies the version of the database engine.
EngineVersion *string `type:"string"`
+ // True if mapping of AWS Identity and Access Management (IAM) accounts to database
+ // accounts is enabled; otherwise false.
+ IAMDatabaseAuthenticationEnabled *bool `type:"boolean"`
+
// Specifies the time when the snapshot was taken, in Universal Coordinated
// Time (UTC).
InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
@@ -15592,6 +15581,12 @@ func (s *DBSnapshot) SetEngineVersion(v string) *DBSnapshot {
return s
}
+// SetIAMDatabaseAuthenticationEnabled sets the IAMDatabaseAuthenticationEnabled field's value.
+func (s *DBSnapshot) SetIAMDatabaseAuthenticationEnabled(v bool) *DBSnapshot {
+ s.IAMDatabaseAuthenticationEnabled = &v
+ return s
+}
+
// SetInstanceCreateTime sets the InstanceCreateTime field's value.
func (s *DBSnapshot) SetInstanceCreateTime(v time.Time) *DBSnapshot {
s.InstanceCreateTime = &v
@@ -21259,6 +21254,12 @@ type ModifyDBClusterInput struct {
// The name of the DB cluster parameter group to use for the DB cluster.
DBClusterParameterGroupName *string `type:"string"`
+ // A Boolean value that is true to enable mapping of AWS Identity and Access
+ // Management (IAM) accounts to database accounts, and otherwise false.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The new password for the master database user. This password can contain
// any printable ASCII character except "/", """, or "@".
//
@@ -21332,7 +21333,7 @@ type ModifyDBClusterInput struct {
// Constraints: Minimum 30-minute window.
PreferredMaintenanceWindow *string `type:"string"`
- // A lst of VPC security groups that the DB cluster will belong to.
+ // A list of VPC security groups that the DB cluster will belong to.
VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"`
}
@@ -21383,6 +21384,12 @@ func (s *ModifyDBClusterInput) SetDBClusterParameterGroupName(v string) *ModifyD
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *ModifyDBClusterInput) SetEnableIAMDatabaseAuthentication(v bool) *ModifyDBClusterInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetMasterUserPassword sets the MasterUserPassword field's value.
func (s *ModifyDBClusterInput) SetMasterUserPassword(v string) *ModifyDBClusterInput {
s.MasterUserPassword = &v
@@ -21904,6 +21911,18 @@ type ModifyDBInstanceInput struct {
// The name of the IAM role to use when making API calls to the Directory Service.
DomainIAMRoleName *string `type:"string"`
+ // True to enable mapping of AWS Identity and Access Management (IAM) accounts
+ // to database accounts; otherwise false.
+ //
+ // You can enable IAM database authentication for the following database engines
+ //
+ // * For MySQL 5.6, minor version 5.6.34 or higher
+ //
+ // * For MySQL 5.7, minor version 5.7.16 or higher
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The version number of the database engine to upgrade to. Changing this parameter
// results in an outage and the change is applied during the next maintenance
// window unless the ApplyImmediately parameter is set to true for this request.
@@ -22230,6 +22249,12 @@ func (s *ModifyDBInstanceInput) SetDomainIAMRoleName(v string) *ModifyDBInstance
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *ModifyDBInstanceInput) SetEnableIAMDatabaseAuthentication(v bool) *ModifyDBInstanceInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngineVersion sets the EngineVersion field's value.
func (s *ModifyDBInstanceInput) SetEngineVersion(v string) *ModifyDBInstanceInput {
s.EngineVersion = &v
@@ -23644,6 +23669,9 @@ type OrderableDBInstanceOption struct {
// from 1 to 60 seconds.
SupportsEnhancedMonitoring *bool `type:"boolean"`
+ // Indicates whether this orderable DB instance supports IAM database authentication.
+ SupportsIAMDatabaseAuthentication *bool `type:"boolean"`
+
// Indicates whether this orderable DB instance supports provisioned IOPS.
SupportsIops *bool `type:"boolean"`
@@ -23718,6 +23746,12 @@ func (s *OrderableDBInstanceOption) SetSupportsEnhancedMonitoring(v bool) *Order
return s
}
+// SetSupportsIAMDatabaseAuthentication sets the SupportsIAMDatabaseAuthentication field's value.
+func (s *OrderableDBInstanceOption) SetSupportsIAMDatabaseAuthentication(v bool) *OrderableDBInstanceOption {
+ s.SupportsIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetSupportsIops sets the SupportsIops field's value.
func (s *OrderableDBInstanceOption) SetSupportsIops(v bool) *OrderableDBInstanceOption {
s.SupportsIops = &v
@@ -24508,7 +24542,7 @@ func (s *RecurringCharge) SetRecurringChargeFrequency(v string) *RecurringCharge
type RemoveRoleFromDBClusterInput struct {
_ struct{} `type:"structure"`
- // The name of the DB cluster to disassociate the IAM role rom.
+ // The name of the DB cluster to disassociate the IAM role from.
//
// DBClusterIdentifier is a required field
DBClusterIdentifier *string `type:"string" required:"true"`
@@ -25060,10 +25094,10 @@ type ResetDBParameterGroupInput struct {
// DBParameterGroupName is a required field
DBParameterGroupName *string `type:"string" required:"true"`
- // An array of parameter names, values, and the apply method for the parameter
- // update. At least one parameter name, value, and apply method must be supplied;
- // subsequent arguments are optional. A maximum of 20 parameters can be modified
- // in a single request.
+ // To reset the entire DB parameter group, specify the DBParameterGroup name
+ // and ResetAllParameters parameters. To reset specific parameters, provide
+ // a list of the following: ParameterName and ApplyMethod. A maximum of 20 parameters
+ // can be modified in a single request.
//
// MySQL
//
@@ -25230,6 +25264,12 @@ type RestoreDBClusterFromS3Input struct {
// The database name for the restored DB cluster.
DatabaseName *string `type:"string"`
+ // A Boolean value that is true to enable mapping of AWS Identity and Access
+ // Management (IAM) accounts to database accounts, and otherwise false.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The name of the database engine to be used for the restored DB cluster.
//
// Valid Values: aurora
@@ -25457,6 +25497,12 @@ func (s *RestoreDBClusterFromS3Input) SetDatabaseName(v string) *RestoreDBCluste
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *RestoreDBClusterFromS3Input) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBClusterFromS3Input {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngine sets the Engine field's value.
func (s *RestoreDBClusterFromS3Input) SetEngine(v string) *RestoreDBClusterFromS3Input {
s.Engine = &v
@@ -25632,6 +25678,12 @@ type RestoreDBClusterFromSnapshotInput struct {
// The database name for the restored DB cluster.
DatabaseName *string `type:"string"`
+ // A Boolean value that is true to enable mapping of AWS Identity and Access
+ // Management (IAM) accounts to database accounts, and otherwise false.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The database engine to use for the new DB cluster.
//
// Default: The same as source
@@ -25746,6 +25798,12 @@ func (s *RestoreDBClusterFromSnapshotInput) SetDatabaseName(v string) *RestoreDB
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *RestoreDBClusterFromSnapshotInput) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBClusterFromSnapshotInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngine sets the Engine field's value.
func (s *RestoreDBClusterFromSnapshotInput) SetEngine(v string) *RestoreDBClusterFromSnapshotInput {
s.Engine = &v
@@ -25857,6 +25915,12 @@ type RestoreDBClusterToPointInTimeInput struct {
// Example: mySubnetgroup
DBSubnetGroupName *string `type:"string"`
+ // A Boolean value that is true to enable mapping of AWS Identity and Access
+ // Management (IAM) accounts to database accounts, and otherwise false.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The KMS key identifier to use when restoring an encrypted DB cluster from
// an encrypted DB cluster.
//
@@ -25974,6 +26038,12 @@ func (s *RestoreDBClusterToPointInTimeInput) SetDBSubnetGroupName(v string) *Res
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *RestoreDBClusterToPointInTimeInput) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBClusterToPointInTimeInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetKmsKeyId sets the KmsKeyId field's value.
func (s *RestoreDBClusterToPointInTimeInput) SetKmsKeyId(v string) *RestoreDBClusterToPointInTimeInput {
s.KmsKeyId = &v
@@ -26144,6 +26214,20 @@ type RestoreDBInstanceFromDBSnapshotInput struct {
// Directory Service.
DomainIAMRoleName *string `type:"string"`
+ // True to enable mapping of AWS Identity and Access Management (IAM) accounts
+ // to database accounts; otherwise false.
+ //
+ // You can enable IAM database authentication for the following database engines
+ //
+ // * For MySQL 5.6, minor version 5.6.34 or higher
+ //
+ // * For MySQL 5.7, minor version 5.7.16 or higher
+ //
+ // * Aurora 5.6 or higher.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The database engine to use for the new instance.
//
// Default: The same as source
@@ -26320,6 +26404,12 @@ func (s *RestoreDBInstanceFromDBSnapshotInput) SetDomainIAMRoleName(v string) *R
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *RestoreDBInstanceFromDBSnapshotInput) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBInstanceFromDBSnapshotInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngine sets the Engine field's value.
func (s *RestoreDBInstanceFromDBSnapshotInput) SetEngine(v string) *RestoreDBInstanceFromDBSnapshotInput {
s.Engine = &v
@@ -26471,6 +26561,20 @@ type RestoreDBInstanceToPointInTimeInput struct {
// Directory Service.
DomainIAMRoleName *string `type:"string"`
+ // True to enable mapping of AWS Identity and Access Management (IAM) accounts
+ // to database accounts; otherwise false.
+ //
+ // You can enable IAM database authentication for the following database engines
+ //
+ // * For MySQL 5.6, minor version 5.6.34 or higher
+ //
+ // * For MySQL 5.7, minor version 5.7.16 or higher
+ //
+ // * Aurora 5.6 or higher.
+ //
+ // Default: false
+ EnableIAMDatabaseAuthentication *bool `type:"boolean"`
+
// The database engine to use for the new instance.
//
// Default: The same as source
@@ -26679,6 +26783,12 @@ func (s *RestoreDBInstanceToPointInTimeInput) SetDomainIAMRoleName(v string) *Re
return s
}
+// SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value.
+func (s *RestoreDBInstanceToPointInTimeInput) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBInstanceToPointInTimeInput {
+ s.EnableIAMDatabaseAuthentication = &v
+ return s
+}
+
// SetEngine sets the Engine field's value.
func (s *RestoreDBInstanceToPointInTimeInput) SetEngine(v string) *RestoreDBInstanceToPointInTimeInput {
s.Engine = &v
diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go
index 2617719ff..4cb1983df 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go
@@ -272,7 +272,9 @@ const (
// ErrCodeInvalidDBParameterGroupStateFault for service response error code
// "InvalidDBParameterGroupState".
//
- // The DB parameter group cannot be deleted because it is in use.
+ // The DB parameter group is in use or is in an invalid state. If you are attempting
+ // to delete the parameter group, you cannot delete it when the parameter group
+ // is in this state.
ErrCodeInvalidDBParameterGroupStateFault = "InvalidDBParameterGroupState"
// ErrCodeInvalidDBSecurityGroupStateFault for service response error code
diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go
index 80fa5120a..0c02c6217 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go
@@ -14,9 +14,9 @@ import (
//
// Amazon Relational Database Service (Amazon RDS) is a web service that makes
// it easier to set up, operate, and scale a relational database in the cloud.
-// It provides cost-efficient, resizeable capacity for an industry-standard
-// relational database and manages common database administration tasks, freeing
-// up developers to focus on what makes their applications and businesses unique.
+// It provides cost-efficient, resizable capacity for an industry-standard relational
+// database and manages common database administration tasks, freeing up developers
+// to focus on what makes their applications and businesses unique.
//
// Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL,
// Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities
diff --git a/vendor/github.com/joyent/triton-go/accounts.go b/vendor/github.com/joyent/triton-go/accounts.go
index 8049d4e7e..474cce000 100644
--- a/vendor/github.com/joyent/triton-go/accounts.go
+++ b/vendor/github.com/joyent/triton-go/accounts.go
@@ -5,8 +5,9 @@ import (
"net/http"
"time"
- "github.com/hashicorp/errwrap"
"fmt"
+
+ "github.com/hashicorp/errwrap"
)
type AccountsClient struct {
@@ -40,7 +41,8 @@ type Account struct {
type GetAccountInput struct{}
func (client *AccountsClient) GetAccount(input *GetAccountInput) (*Account, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my", nil)
+ path := fmt.Sprintf("/%s", client.accountName)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
@@ -58,17 +60,17 @@ func (client *AccountsClient) GetAccount(input *GetAccountInput) (*Account, erro
}
type UpdateAccountInput struct {
- Email string `json:"email,omitempty"`
- CompanyName string `json:"companyName,omitempty"`
- FirstName string `json:"firstName,omitempty"`
- LastName string `json:"lastName,omitempty"`
- Address string `json:"address,omitempty"`
- PostalCode string `json:"postalCode,omitempty"`
- City string `json:"city,omitempty"`
- State string `json:"state,omitempty"`
- Country string `json:"country,omitempty"`
- Phone string `json:"phone,omitempty"`
- TritonCNSEnabled bool `json:"triton_cns_enabled,omitempty"`
+ Email string `json:"email,omitempty"`
+ CompanyName string `json:"companyName,omitempty"`
+ FirstName string `json:"firstName,omitempty"`
+ LastName string `json:"lastName,omitempty"`
+ Address string `json:"address,omitempty"`
+ PostalCode string `json:"postalCode,omitempty"`
+ City string `json:"city,omitempty"`
+ State string `json:"state,omitempty"`
+ Country string `json:"country,omitempty"`
+ Phone string `json:"phone,omitempty"`
+ TritonCNSEnabled bool `json:"triton_cns_enabled,omitempty"`
}
// UpdateAccount updates your account details with the given parameters.
diff --git a/vendor/github.com/joyent/triton-go/client.go b/vendor/github.com/joyent/triton-go/client.go
index e34c6b1fe..5ee1f0be7 100644
--- a/vendor/github.com/joyent/triton-go/client.go
+++ b/vendor/github.com/joyent/triton-go/client.go
@@ -2,6 +2,7 @@ package triton
import (
"bytes"
+ "crypto/tls"
"encoding/json"
"fmt"
"io"
@@ -45,16 +46,7 @@ func NewClient(endpoint string, accountName string, signers ...authentication.Si
}
httpClient := &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- DisableKeepAlives: true,
- MaxIdleConnsPerHost: -1,
- },
+ Transport: httpTransport(false),
CheckRedirect: doNotFollowRedirects,
}
@@ -75,6 +67,34 @@ func NewClient(endpoint string, accountName string, signers ...authentication.Si
}, nil
}
+// InsecureSkipTLSVerify turns off TLS verification for the client connection. This
+// allows connection to an endpoint with a certificate which was signed by a non-
+// trusted CA, such as self-signed certificates. This can be useful when connecting
+// to temporary Triton installations such as Triton Cloud-On-A-Laptop.
+func (c *Client) InsecureSkipTLSVerify() {
+ if c.client == nil {
+ return
+ }
+
+ c.client.HTTPClient.Transport = httpTransport(true)
+}
+
+func httpTransport(insecureSkipTLSVerify bool) *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ DisableKeepAlives: true,
+ MaxIdleConnsPerHost: -1,
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: insecureSkipTLSVerify,
+ },
+ }
+}
+
func doNotFollowRedirects(*http.Request, []*http.Request) error {
return http.ErrUseLastResponse
}
diff --git a/vendor/github.com/joyent/triton-go/datacenters.go b/vendor/github.com/joyent/triton-go/datacenters.go
index c90cc954a..72ccbd6d9 100644
--- a/vendor/github.com/joyent/triton-go/datacenters.go
+++ b/vendor/github.com/joyent/triton-go/datacenters.go
@@ -28,7 +28,8 @@ type DataCenter struct {
type ListDataCentersInput struct{}
func (client *DataCentersClient) ListDataCenters(*ListDataCentersInput) ([]*DataCenter, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my/datacenters", nil)
+ path := fmt.Sprintf("/%s/datacenters", client.accountName)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
@@ -68,7 +69,8 @@ type GetDataCenterInput struct {
}
func (client *DataCentersClient) GetDataCenter(input *GetDataCenterInput) (*DataCenter, error) {
- resp, err := client.executeRequestRaw(http.MethodGet, fmt.Sprintf("/my/datacenters/%s", input.Name), nil)
+ path := fmt.Sprintf("/%s/datacenters/%s", client.accountName, input.Name)
+ resp, err := client.executeRequestRaw(http.MethodGet, path, nil)
if err != nil {
return nil, errwrap.Wrapf("Error executing GetDatacenter request: {{err}}", err)
}
diff --git a/vendor/github.com/joyent/triton-go/fabrics.go b/vendor/github.com/joyent/triton-go/fabrics.go
index 5404d10bb..5b1caa521 100644
--- a/vendor/github.com/joyent/triton-go/fabrics.go
+++ b/vendor/github.com/joyent/triton-go/fabrics.go
@@ -27,7 +27,8 @@ type FabricVLAN struct {
type ListFabricVLANsInput struct{}
func (client *FabricsClient) ListFabricVLANs(*ListFabricVLANsInput) ([]*FabricVLAN, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my/fabrics/default/vlans", nil)
+ path := fmt.Sprintf("/%s/fabrics/default/vlans", client.accountName)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
diff --git a/vendor/github.com/joyent/triton-go/firewall.go b/vendor/github.com/joyent/triton-go/firewall.go
index c91d012d9..e9d57a80e 100644
--- a/vendor/github.com/joyent/triton-go/firewall.go
+++ b/vendor/github.com/joyent/triton-go/firewall.go
@@ -39,7 +39,8 @@ type FirewallRule struct {
type ListFirewallRulesInput struct{}
func (client *FirewallClient) ListFirewallRules(*ListFirewallRulesInput) ([]*FirewallRule, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my/fwrules", nil)
+ path := fmt.Sprintf("/%s/fwrules", client.accountName)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
@@ -194,7 +195,8 @@ type ListMachineFirewallRulesInput struct {
}
func (client *FirewallClient) ListMachineFirewallRules(input *ListMachineFirewallRulesInput) ([]*FirewallRule, error) {
- respReader, err := client.executeRequest(http.MethodGet, fmt.Sprintf("/my/machines/%s/firewallrules", input.MachineID), nil)
+ path := fmt.Sprintf("/%s/machines/%s/firewallrules", client.accountName, input.MachineID)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
diff --git a/vendor/github.com/joyent/triton-go/images.go b/vendor/github.com/joyent/triton-go/images.go
index 3e0aa8a75..01d6ea96d 100644
--- a/vendor/github.com/joyent/triton-go/images.go
+++ b/vendor/github.com/joyent/triton-go/images.go
@@ -49,7 +49,8 @@ type Image struct {
type ListImagesInput struct{}
func (client *ImagesClient) ListImages(*ListImagesInput) ([]*Image, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my/images", nil)
+ path := fmt.Sprintf("/%s/images", client.accountName)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
@@ -148,7 +149,7 @@ type CreateImageFromMachineInput struct {
HomePage string `json:"homepage,omitempty"`
EULA string `json:"eula,omitempty"`
ACL []string `json:"acl,omitempty"`
- tags map[string]string `json:"tags,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
}
func (client *ImagesClient) CreateImageFromMachine(input *CreateImageFromMachineInput) (*Image, error) {
@@ -178,7 +179,7 @@ type UpdateImageInput struct {
HomePage string `json:"homepage,omitempty"`
EULA string `json:"eula,omitempty"`
ACL []string `json:"acl,omitempty"`
- tags map[string]string `json:"tags,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
}
func (client *ImagesClient) UpdateImage(input *UpdateImageInput) (*Image, error) {
diff --git a/vendor/github.com/joyent/triton-go/keys.go b/vendor/github.com/joyent/triton-go/keys.go
index a4f394a21..e63302349 100644
--- a/vendor/github.com/joyent/triton-go/keys.go
+++ b/vendor/github.com/joyent/triton-go/keys.go
@@ -35,7 +35,8 @@ type ListKeysInput struct{}
// ListKeys lists all public keys we have on record for the specified
// account.
func (client *KeysClient) ListKeys(*ListKeysInput) ([]*Key, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my/keys", nil)
+ path := fmt.Sprintf("/%s/keys")
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
diff --git a/vendor/github.com/joyent/triton-go/machines.go b/vendor/github.com/joyent/triton-go/machines.go
index 0fae69b4a..287c706c7 100644
--- a/vendor/github.com/joyent/triton-go/machines.go
+++ b/vendor/github.com/joyent/triton-go/machines.go
@@ -100,7 +100,7 @@ func (client *MachinesClient) GetMachine(input *GetMachineInput) (*Machine, erro
if response != nil {
defer response.Body.Close()
}
- if response.StatusCode == http.StatusNotFound {
+ if response.StatusCode == http.StatusNotFound || response.StatusCode == http.StatusGone {
return nil, &TritonError{
Code: "ResourceNotFound",
}
@@ -219,7 +219,8 @@ func (input *CreateMachineInput) toAPI() map[string]interface{} {
}
func (client *MachinesClient) CreateMachine(input *CreateMachineInput) (*Machine, error) {
- respReader, err := client.executeRequest(http.MethodPost, "/my/machines", input.toAPI())
+ path := fmt.Sprintf("/%s/machines", client.accountName)
+ respReader, err := client.executeRequest(http.MethodPost, path, input.toAPI())
if respReader != nil {
defer respReader.Close()
}
@@ -501,7 +502,8 @@ type ListNICsInput struct {
}
func (client *MachinesClient) ListNICs(input *ListNICsInput) ([]*NIC, error) {
- respReader, err := client.executeRequest(http.MethodGet, fmt.Sprintf("/my/machines/%s/nics", input.MachineID), nil)
+ path := fmt.Sprintf("/%s/machines/%s/nics", client.accountName, input.MachineID)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
@@ -560,6 +562,48 @@ func (client *MachinesClient) RemoveNIC(input *RemoveNICInput) error {
return nil
}
+type StopMachineInput struct {
+ MachineID string
+}
+
+func (client *MachinesClient) StopMachine(input *StopMachineInput) error {
+ path := fmt.Sprintf("/%s/machines/%s", client.accountName, input.MachineID)
+
+ params := &url.Values{}
+ params.Set("action", "stop")
+
+ respReader, err := client.executeRequestURIParams(http.MethodPost, path, nil, params)
+ if respReader != nil {
+ defer respReader.Close()
+ }
+ if err != nil {
+ return errwrap.Wrapf("Error executing StopMachine request: {{err}}", err)
+ }
+
+ return nil
+}
+
+type StartMachineInput struct {
+ MachineID string
+}
+
+func (client *MachinesClient) StartMachine(input *StartMachineInput) error {
+ path := fmt.Sprintf("/%s/machines/%s", client.accountName, input.MachineID)
+
+ params := &url.Values{}
+ params.Set("action", "start")
+
+ respReader, err := client.executeRequestURIParams(http.MethodPost, path, nil, params)
+ if respReader != nil {
+ defer respReader.Close()
+ }
+ if err != nil {
+ return errwrap.Wrapf("Error executing StartMachine request: {{err}}", err)
+ }
+
+ return nil
+}
+
var reservedMachineCNSTags = map[string]struct{}{
machineCNSTagDisable: {},
machineCNSTagReversePTR: {},
diff --git a/vendor/github.com/joyent/triton-go/networks.go b/vendor/github.com/joyent/triton-go/networks.go
index cb1ec1700..440e3bc87 100644
--- a/vendor/github.com/joyent/triton-go/networks.go
+++ b/vendor/github.com/joyent/triton-go/networks.go
@@ -36,7 +36,8 @@ type Network struct {
type ListNetworksInput struct{}
func (client *NetworksClient) ListNetworks(*ListNetworksInput) ([]*Network, error) {
- respReader, err := client.executeRequest(http.MethodGet, "/my/networks", nil)
+ path := fmt.Sprintf("/%s/networks", client.accountName)
+ respReader, err := client.executeRequest(http.MethodGet, path, nil)
if respReader != nil {
defer respReader.Close()
}
diff --git a/vendor/github.com/profitbricks/profitbricks-sdk-go/nic.go b/vendor/github.com/profitbricks/profitbricks-sdk-go/nic.go
index 1d082a64d..86571ad01 100644
--- a/vendor/github.com/profitbricks/profitbricks-sdk-go/nic.go
+++ b/vendor/github.com/profitbricks/profitbricks-sdk-go/nic.go
@@ -22,7 +22,7 @@ type NicProperties struct {
Name string `json:"name,omitempty"`
Mac string `json:"mac,omitempty"`
Ips []string `json:"ips,omitempty"`
- Dhcp bool `json:"dhcp,omitempty"`
+ Dhcp bool `json:"dhcp"`
Lan int `json:"lan,omitempty"`
FirewallActive bool `json:"firewallActive,omitempty"`
Nat bool `json:"nat,omitempty"`
diff --git a/vendor/github.com/profitbricks/profitbricks-sdk-go/request.go b/vendor/github.com/profitbricks/profitbricks-sdk-go/request.go
index 023ae1a4a..98cbbffd9 100644
--- a/vendor/github.com/profitbricks/profitbricks-sdk-go/request.go
+++ b/vendor/github.com/profitbricks/profitbricks-sdk-go/request.go
@@ -3,6 +3,7 @@ package profitbricks
import (
"encoding/json"
"net/http"
+ "time"
)
type RequestStatus struct {
@@ -26,6 +27,55 @@ type RequestTarget struct {
Status string `json:"status,omitempty"`
}
+type Requests struct {
+ Id string `json:"id,omitempty"`
+ Type_ string `json:"type,omitempty"`
+ Href string `json:"href,omitempty"`
+ Items []Request `json:"items,omitempty"`
+ Response string `json:"Response,omitempty"`
+ Headers *http.Header `json:"headers,omitempty"`
+ StatusCode int `json:"headers,omitempty"`
+}
+
+type Request struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Href string `json:"href"`
+ Metadata struct {
+ CreatedDate time.Time `json:"createdDate"`
+ CreatedBy string `json:"createdBy"`
+ Etag string `json:"etag"`
+ RequestStatus struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Href string `json:"href"`
+ } `json:"requestStatus"`
+ } `json:"metadata"`
+ Properties struct {
+ Method string `json:"method"`
+ Headers interface{} `json:"headers"`
+ Body interface{} `json:"body"`
+ URL string `json:"url"`
+ } `json:"properties"`
+ Response string `json:"Response,omitempty"`
+ Headers *http.Header `json:"headers,omitempty"`
+ StatusCode int `json:"headers,omitempty"`
+}
+
+func ListRequests() Requests {
+ url := mk_url("/requests") + `?depth=` + Depth
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Add("Content-Type", FullHeader)
+ return toRequests(do(req))
+}
+
+func GetRequest(req_id string) Request {
+ url := mk_url("/requests/" + req_id) + `?depth=` + Depth
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Add("Content-Type", FullHeader)
+ return toRequest(do(req))
+}
+
func GetRequestStatus(path string) RequestStatus {
url := mk_url(path) + `?depth=` + Depth
req, _ := http.NewRequest("GET", url, nil)
@@ -41,3 +91,22 @@ func toRequestStatus(resp Resp) RequestStatus {
server.StatusCode = resp.StatusCode
return server
}
+
+func toRequests(resp Resp) Requests {
+ var server Requests
+ json.Unmarshal(resp.Body, &server)
+ server.Response = string(resp.Body)
+ server.Headers = &resp.Headers
+ server.StatusCode = resp.StatusCode
+ return server
+}
+
+func toRequest(resp Resp) Request {
+ var server Request
+ json.Unmarshal(resp.Body, &server)
+ server.Response = string(resp.Body)
+ server.Headers = &resp.Headers
+ server.StatusCode = resp.StatusCode
+ return server
+}
+
diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json
index 391300e3c..1a90deafc 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-api.json
+++ b/vendor/google.golang.org/api/compute/v1/compute-api.json
@@ -1,11 +1,11 @@
{
"kind": "discovery#restDescription",
- "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/BIPueDp4_YHmOXWnzCh7vT7JOHQ\"",
+ "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/Jd_rZl9yiuNZcWvcgDDlnKBhwlY\"",
"discoveryVersion": "v1",
"id": "compute:v1",
"name": "compute",
"version": "v1",
- "revision": "20161123",
+ "revision": "20170329",
"title": "Compute Engine API",
"description": "Creates and runs virtual machines on Google Cloud Platform.",
"ownerDomain": "google.com",
@@ -103,7 +103,7 @@
},
"name": {
"type": "string",
- "description": "Name of this access configuration."
+ "description": "The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access."
},
"natIP": {
"type": "string",
@@ -282,6 +282,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -302,6 +303,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -410,12 +412,7 @@
"enumDescriptions": [
"",
""
- ],
- "annotations": {
- "required": [
- "compute.instances.insert"
- ]
- }
+ ]
}
}
},
@@ -596,6 +593,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -616,6 +614,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -794,6 +793,79 @@
}
}
},
+ "BackendBucket": {
+ "id": "BackendBucket",
+ "type": "object",
+ "description": "A BackendBucket resource. This resource defines a Cloud Storage bucket.",
+ "properties": {
+ "bucketName": {
+ "type": "string",
+ "description": "Cloud Storage bucket name."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "[Output Only] Creation timestamp in RFC3339 text format."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "enableCdn": {
+ "type": "boolean",
+ "description": "If true, enable Cloud CDN for this BackendBucket."
+ },
+ "id": {
+ "type": "string",
+ "description": "[Output Only] Unique identifier for the resource; defined by the server.",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#backendBucket"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "[Output Only] Server-defined URL for the resource."
+ }
+ }
+ },
+ "BackendBucketList": {
+ "id": "BackendBucketList",
+ "type": "object",
+ "description": "Contains a list of BackendBucket resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "[Output Only] Unique identifier for the resource; defined by the server."
+ },
+ "items": {
+ "type": "array",
+ "description": "A list of BackendBucket resources.",
+ "items": {
+ "$ref": "BackendBucket"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#backendBucketList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "[Output Only] A token used to continue a truncated list request."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "[Output Only] Server-defined URL for this resource."
+ }
+ }
+ },
"BackendService": {
"id": "BackendService",
"type": "object",
@@ -811,6 +883,10 @@
"$ref": "Backend"
}
},
+ "cdnPolicy": {
+ "$ref": "BackendServiceCdnPolicy",
+ "description": "Cloud CDN configuration for this BackendService."
+ },
"connectionDraining": {
"$ref": "ConnectionDraining"
},
@@ -877,7 +953,7 @@
},
"protocol": {
"type": "string",
- "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.",
+ "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.",
"enum": [
"HTTP",
"HTTPS",
@@ -958,6 +1034,17 @@
}
}
},
+ "BackendServiceCdnPolicy": {
+ "id": "BackendServiceCdnPolicy",
+ "type": "object",
+ "description": "Message containing Cloud CDN configuration for a backend service.",
+ "properties": {
+ "cacheKeyPolicy": {
+ "$ref": "CacheKeyPolicy",
+ "description": "The CacheKeyPolicy for this CdnPolicy."
+ }
+ }
+ },
"BackendServiceGroupHealth": {
"id": "BackendServiceGroupHealth",
"type": "object",
@@ -998,7 +1085,7 @@
},
"nextPageToken": {
"type": "string",
- "description": "[Output Only] A token used to continue a truncated list request."
+ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results."
},
"selfLink": {
"type": "string",
@@ -1038,6 +1125,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -1058,6 +1146,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -1099,6 +1188,39 @@
}
}
},
+ "CacheKeyPolicy": {
+ "id": "CacheKeyPolicy",
+ "type": "object",
+ "description": "Message containing what to include in the cache key for a request for Cloud CDN.",
+ "properties": {
+ "includeHost": {
+ "type": "boolean",
+ "description": "If true, requests to different hosts will be cached separately."
+ },
+ "includeProtocol": {
+ "type": "boolean",
+ "description": "If true, http and https requests will be cached separately."
+ },
+ "includeQueryString": {
+ "type": "boolean",
+ "description": "If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely."
+ },
+ "queryStringBlacklist": {
+ "type": "array",
+ "description": "Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "queryStringWhitelist": {
+ "type": "array",
+ "description": "Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"ConnectionDraining": {
"id": "ConnectionDraining",
"type": "object",
@@ -1322,7 +1444,7 @@
},
"nextPageToken": {
"type": "string",
- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results."
+ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results. Acceptable values are 0 to 500, inclusive. (Default: 500)"
},
"selfLink": {
"type": "string",
@@ -1337,11 +1459,11 @@
"properties": {
"id": {
"type": "string",
- "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server."
+ "description": "[Output Only] Unique identifier for the resource; defined by the server."
},
"items": {
"type": "array",
- "description": "[Output Only] A list of persistent disks.",
+ "description": "A list of Disk resources.",
"items": {
"$ref": "Disk"
}
@@ -1353,7 +1475,7 @@
},
"nextPageToken": {
"type": "string",
- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results."
+ "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results."
},
"selfLink": {
"type": "string",
@@ -1520,6 +1642,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -1540,6 +1663,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -1611,6 +1735,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -1631,6 +1756,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -1723,7 +1849,7 @@
},
"sourceRanges": {
"type": "array",
- "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.",
+ "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.",
"items": {
"type": "string"
}
@@ -1782,14 +1908,15 @@
"properties": {
"IPAddress": {
"type": "string",
- "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule."
+ "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule. Only IPv4 is supported."
},
"IPProtocol": {
"type": "string",
- "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL\u003c/code, only TCP and UDP are valid.",
+ "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL, only TCP and UDP are valid.",
"enum": [
"AH",
"ESP",
+ "ICMP",
"SCTP",
"TCP",
"UDP"
@@ -1799,6 +1926,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -1826,7 +1954,7 @@
},
"loadBalancingScheme": {
"type": "string",
- "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL EXTERNAL The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)",
+ "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL, EXTERNAL The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)",
"enum": [
"EXTERNAL",
"INTERNAL",
@@ -1853,7 +1981,7 @@
},
"ports": {
"type": "array",
- "description": "This field is not used for external load balancing.\n\nWhen the load balancing scheme is INTERNAL, a single port or a comma separated list of ports can be configured. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule. If the port list is not provided then all ports are allowed to pass through.\n\nYou may specify a maximum of up to 5 ports.",
+ "description": "This field is not used for external load balancing.\n\nWhen the load balancing scheme is INTERNAL, a single port or a comma separated list of ports can be configured. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule.\n\nYou may specify a maximum of up to 5 ports.",
"items": {
"type": "string"
}
@@ -1872,7 +2000,7 @@
},
"target": {
"type": "string",
- "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic.\n\nThis field is not used for internal load balancing."
+ "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object.\n\nThis field is not used for internal load balancing."
}
}
},
@@ -1970,6 +2098,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -1990,6 +2119,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -2049,7 +2179,7 @@
},
"port": {
"type": "integer",
- "description": "The TCP port number for the health check request. The default value is 80.",
+ "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.",
"format": "int32"
},
"portName": {
@@ -2084,7 +2214,7 @@
},
"port": {
"type": "integer",
- "description": "The TCP port number for the health check request. The default value is 443.",
+ "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.",
"format": "int32"
},
"portName": {
@@ -2709,7 +2839,13 @@
},
"name": {
"type": "string",
- "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash."
+ "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.instances.insert"
+ ]
+ }
},
"networkInterfaces": {
"type": "array",
@@ -2720,7 +2856,7 @@
},
"scheduling": {
"$ref": "Scheduling",
- "description": "Scheduling options for this instance."
+ "description": "Sets the scheduling options for this instance."
},
"selfLink": {
"type": "string",
@@ -2728,7 +2864,7 @@
},
"serviceAccounts": {
"type": "array",
- "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.",
+ "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported.\n\nService accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.",
"items": {
"$ref": "ServiceAccount"
}
@@ -3225,6 +3361,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -3245,6 +3382,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -3407,6 +3545,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -3427,6 +3566,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -3732,6 +3872,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -3752,6 +3893,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -3790,6 +3932,23 @@
}
}
},
+ "InstancesSetServiceAccountRequest": {
+ "id": "InstancesSetServiceAccountRequest",
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string",
+ "description": "Email address of the service account."
+ },
+ "scopes": {
+ "type": "array",
+ "description": "The list of scopes to be made available for this service account.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"InstancesStartWithEncryptionKeyRequest": {
"id": "InstancesStartWithEncryptionKeyRequest",
"type": "object",
@@ -4012,6 +4171,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -4032,6 +4192,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -4295,18 +4456,18 @@
"$ref": "AccessConfig"
}
},
+ "kind": {
+ "type": "string",
+ "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.",
+ "default": "compute#networkInterface"
+ },
"name": {
"type": "string",
"description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc."
},
"network": {
"type": "string",
- "description": "URL of the network resource for this instance. This is required for creating an instance but optional when creating a firewall rule. If not specified when creating a firewall rule, the default network is used:\n\nglobal/networks/default \n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default",
- "annotations": {
- "required": [
- "compute.instances.insert"
- ]
- }
+ "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default"
},
"networkIP": {
"type": "string",
@@ -4360,7 +4521,7 @@
},
"creationTimestamp": {
"type": "string",
- "description": "[Output Only] Creation timestamp in RFC3339 text format."
+ "description": "[Deprecated] This field is deprecated."
},
"description": {
"type": "string",
@@ -4499,6 +4660,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -4519,6 +4681,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -4646,6 +4809,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -4666,6 +4830,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -4812,8 +4977,10 @@
"description": "[Output Only] Name of the quota metric.",
"enum": [
"AUTOSCALERS",
+ "BACKEND_BUCKETS",
"BACKEND_SERVICES",
"CPUS",
+ "CPUS_ALL_REGIONS",
"DISKS_TOTAL_GB",
"FIREWALLS",
"FORWARDING_RULES",
@@ -4842,7 +5009,6 @@
"TARGET_POOLS",
"TARGET_SSL_PROXIES",
"TARGET_VPN_GATEWAYS",
- "TOTAL_CPUS",
"URL_MAPS",
"VPN_TUNNELS"
],
@@ -4880,6 +5046,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -5052,7 +5219,7 @@
"properties": {
"instances": {
"type": "array",
- "description": "The names of one or more instances to abandon.",
+ "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].",
"items": {
"type": "string"
}
@@ -5065,7 +5232,7 @@
"properties": {
"instances": {
"type": "array",
- "description": "The names of one or more instances to delete.",
+ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].",
"items": {
"type": "string"
}
@@ -5241,7 +5408,7 @@
"Route": {
"id": "Route",
"type": "object",
- "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, a instance gateway or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.",
+ "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.",
"properties": {
"creationTimestamp": {
"type": "string",
@@ -5253,7 +5420,7 @@
},
"destRange": {
"type": "string",
- "description": "The destination range of outgoing packets that this route applies to.",
+ "description": "The destination range of outgoing packets that this route applies to. Only IPv4 is supported.",
"annotations": {
"required": [
"compute.routes.insert"
@@ -5299,7 +5466,7 @@
},
"nextHopIp": {
"type": "string",
- "description": "The network IP address of an instance that should handle matching packets."
+ "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported."
},
"nextHopNetwork": {
"type": "string",
@@ -5358,6 +5525,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -5378,6 +5546,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -5566,7 +5735,7 @@
},
"ipAddress": {
"type": "string",
- "description": "IP address of the interface inside Google Cloud Platform."
+ "description": "IP address of the interface inside Google Cloud Platform. Only IPv4 is supported."
},
"name": {
"type": "string",
@@ -5580,7 +5749,7 @@
},
"peerIpAddress": {
"type": "string",
- "description": "IP address of the BGP interface outside Google cloud."
+ "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported."
}
}
},
@@ -5594,7 +5763,7 @@
},
"linkedVpnTunnel": {
"type": "string",
- "description": "URI of linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource."
+ "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment."
},
"name": {
"type": "string",
@@ -5773,6 +5942,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -5793,6 +5963,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -5827,7 +5998,7 @@
"properties": {
"port": {
"type": "integer",
- "description": "The TCP port number for the health check request. The default value is 443.",
+ "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.",
"format": "int32"
},
"portName": {
@@ -5863,7 +6034,7 @@
"properties": {
"automaticRestart": {
"type": "boolean",
- "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted."
+ "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine."
},
"onHostMaintenance": {
"type": "string",
@@ -5879,7 +6050,7 @@
},
"preemptible": {
"type": "boolean",
- "description": "Whether the instance is preemptible."
+ "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created."
}
}
},
@@ -5908,7 +6079,7 @@
},
"start": {
"type": "string",
- "description": "[Output Only] The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.",
+ "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.",
"format": "int64"
}
}
@@ -6156,7 +6327,7 @@
},
"ipCidrRange": {
"type": "string",
- "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network."
+ "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported."
},
"kind": {
"type": "string",
@@ -6172,6 +6343,10 @@
"type": "string",
"description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks."
},
+ "privateIpGoogleAccess": {
+ "type": "boolean",
+ "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses."
+ },
"region": {
"type": "string",
"description": "URL of the region where the Subnetwork resides."
@@ -6286,6 +6461,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -6306,6 +6482,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -6334,13 +6511,22 @@
}
}
},
+ "SubnetworksSetPrivateIpGoogleAccessRequest": {
+ "id": "SubnetworksSetPrivateIpGoogleAccessRequest",
+ "type": "object",
+ "properties": {
+ "privateIpGoogleAccess": {
+ "type": "boolean"
+ }
+ }
+ },
"TCPHealthCheck": {
"id": "TCPHealthCheck",
"type": "object",
"properties": {
"port": {
"type": "integer",
- "description": "The TCP port number for the health check request. The default value is 80.",
+ "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.",
"format": "int32"
},
"portName": {
@@ -6692,6 +6878,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -6712,6 +6899,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -6743,7 +6931,7 @@
"TargetPool": {
"id": "TargetPool",
"type": "object",
- "description": "A TargetPool resource. This resource defines a pool of instances, associated HttpHealthCheck resources, and the fallback target pool.",
+ "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.",
"properties": {
"backupPool": {
"type": "string",
@@ -6764,7 +6952,7 @@
},
"healthChecks": {
"type": "array",
- "description": "A list of URLs to the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member instances will be considered healthy at all times.",
+ "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.",
"items": {
"type": "string"
}
@@ -6904,7 +7092,7 @@
"properties": {
"healthChecks": {
"type": "array",
- "description": "A list of HttpHealthCheck resources to add to the target pool.",
+ "description": "The HttpHealthCheck to add to the target pool.",
"items": {
"$ref": "HealthCheckReference"
}
@@ -6982,6 +7170,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -7002,6 +7191,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -7344,6 +7534,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -7364,6 +7555,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -7631,7 +7823,7 @@
},
"localTrafficSelector": {
"type": "array",
- "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint.",
+ "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.",
"items": {
"type": "string"
}
@@ -7648,7 +7840,7 @@
},
"peerIp": {
"type": "string",
- "description": "IP address of the peer VPN gateway."
+ "description": "IP address of the peer VPN gateway. Only IPv4 is supported."
},
"region": {
"type": "string",
@@ -7656,7 +7848,7 @@
},
"remoteTrafficSelector": {
"type": "array",
- "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint.",
+ "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.",
"items": {
"type": "string"
}
@@ -7814,6 +8006,7 @@
"NOT_CRITICAL_ERROR",
"NO_RESULTS_ON_PAGE",
"REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
"RESOURCE_NOT_DELETED",
"SINGLE_INSTANCE_PROPERTY_TEMPLATE",
"UNREACHABLE"
@@ -7834,6 +8027,7 @@
"",
"",
"",
+ "",
""
]
},
@@ -7963,11 +8157,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8132,11 +8325,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8194,11 +8386,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8363,11 +8554,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8416,8 +8606,7 @@
"parameters": {
"autoscaler": {
"type": "string",
- "description": "Name of the autoscaler to update.",
- "required": true,
+ "description": "Name of the autoscaler to patch.",
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "query"
},
@@ -8438,8 +8627,7 @@
},
"parameterOrder": [
"project",
- "zone",
- "autoscaler"
+ "zone"
],
"request": {
"$ref": "Autoscaler"
@@ -8449,7 +8637,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -8496,6 +8685,227 @@
}
}
},
+ "backendBuckets": {
+ "methods": {
+ "delete": {
+ "id": "compute.backendBuckets.delete",
+ "path": "{project}/global/backendBuckets/{backendBucket}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified BackendBucket resource.",
+ "parameters": {
+ "backendBucket": {
+ "type": "string",
+ "description": "Name of the BackendBucket resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendBucket"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.backendBuckets.get",
+ "path": "{project}/global/backendBuckets/{backendBucket}",
+ "httpMethod": "GET",
+ "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.",
+ "parameters": {
+ "backendBucket": {
+ "type": "string",
+ "description": "Name of the BackendBucket resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendBucket"
+ ],
+ "response": {
+ "$ref": "BackendBucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.backendBuckets.insert",
+ "path": "{project}/global/backendBuckets",
+ "httpMethod": "POST",
+ "description": "Creates a BackendBucket resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "BackendBucket"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.backendBuckets.list",
+ "path": "{project}/global/backendBuckets",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of BackendBucket resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "orderBy": {
+ "type": "string",
+ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "BackendBucketList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "patch": {
+ "id": "compute.backendBuckets.patch",
+ "path": "{project}/global/backendBuckets/{backendBucket}",
+ "httpMethod": "PATCH",
+ "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports patch semantics.",
+ "parameters": {
+ "backendBucket": {
+ "type": "string",
+ "description": "Name of the BackendBucket resource to patch.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendBucket"
+ ],
+ "request": {
+ "$ref": "BackendBucket"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "update": {
+ "id": "compute.backendBuckets.update",
+ "path": "{project}/global/backendBuckets/{backendBucket}",
+ "httpMethod": "PUT",
+ "description": "Updates the specified BackendBucket resource with the data included in the request.",
+ "parameters": {
+ "backendBucket": {
+ "type": "string",
+ "description": "Name of the BackendBucket resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendBucket"
+ ],
+ "request": {
+ "$ref": "BackendBucket"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
"backendServices": {
"methods": {
"aggregatedList": {
@@ -8511,11 +8921,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8692,11 +9101,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8733,11 +9141,11 @@
"id": "compute.backendServices.patch",
"path": "{project}/global/backendServices/{backendService}",
"httpMethod": "PATCH",
- "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.",
+ "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.",
"parameters": {
"backendService": {
"type": "string",
- "description": "Name of the BackendService resource to update.",
+ "description": "Name of the BackendService resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -8762,7 +9170,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -8818,11 +9227,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8910,11 +9318,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -8972,11 +9379,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -9022,6 +9428,10 @@
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
},
+ "guestFlush": {
+ "type": "boolean",
+ "location": "query"
+ },
"project": {
"type": "string",
"description": "Project ID for this request.",
@@ -9189,11 +9599,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -9390,11 +9799,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -9431,7 +9839,7 @@
"id": "compute.firewalls.patch",
"path": "{project}/global/firewalls/{firewall}",
"httpMethod": "PATCH",
- "description": "Updates the specified firewall rule with the data included in the request. This method supports patch semantics.",
+ "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags. This method supports patch semantics.",
"parameters": {
"firewall": {
"type": "string",
@@ -9467,7 +9875,7 @@
"id": "compute.firewalls.update",
"path": "{project}/global/firewalls/{firewall}",
"httpMethod": "PUT",
- "description": "Updates the specified firewall rule with the data included in the request.",
+ "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.",
"parameters": {
"firewall": {
"type": "string",
@@ -9516,11 +9924,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -9685,11 +10092,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -9886,11 +10292,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10035,11 +10440,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10125,11 +10529,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10239,11 +10642,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10388,11 +10790,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10433,7 +10834,7 @@
"parameters": {
"healthCheck": {
"type": "string",
- "description": "Name of the HealthCheck resource to update.",
+ "description": "Name of the HealthCheck resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -10458,7 +10859,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -10609,11 +11011,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10654,7 +11055,7 @@
"parameters": {
"httpHealthCheck": {
"type": "string",
- "description": "Name of the HttpHealthCheck resource to update.",
+ "description": "Name of the HttpHealthCheck resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -10679,7 +11080,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -10830,11 +11232,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -10875,7 +11276,7 @@
"parameters": {
"httpsHealthCheck": {
"type": "string",
- "description": "Name of the HttpsHealthCheck resource to update.",
+ "description": "Name of the HttpsHealthCheck resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -10900,7 +11301,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -11124,11 +11526,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -11169,7 +11570,7 @@
"id": "compute.instanceGroupManagers.abandonInstances",
"path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances",
"httpMethod": "POST",
- "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.",
+ "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.",
"parameters": {
"instanceGroupManager": {
"type": "string",
@@ -11220,11 +11621,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -11300,7 +11700,7 @@
"id": "compute.instanceGroupManagers.deleteInstances",
"path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances",
"httpMethod": "POST",
- "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.",
+ "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.",
"parameters": {
"instanceGroupManager": {
"type": "string",
@@ -11382,7 +11782,7 @@
"id": "compute.instanceGroupManagers.insert",
"path": "{project}/zones/{zone}/instanceGroupManagers",
"httpMethod": "POST",
- "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.",
+ "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group.",
"parameters": {
"project": {
"type": "string",
@@ -11426,11 +11826,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -11491,7 +11890,6 @@
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"order_by": {
@@ -11534,7 +11932,7 @@
"id": "compute.instanceGroupManagers.recreateInstances",
"path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances",
"httpMethod": "POST",
- "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.",
+ "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.",
"parameters": {
"instanceGroupManager": {
"type": "string",
@@ -11762,11 +12160,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -11926,11 +12323,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -11989,11 +12385,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -12233,11 +12628,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -12338,11 +12732,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -12636,7 +13029,7 @@
},
"start": {
"type": "string",
- "description": "For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value that was returned in the previous call.",
+ "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.",
"format": "int64",
"location": "query"
},
@@ -12711,11 +13104,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -12760,7 +13152,7 @@
"id": "compute.instances.reset",
"path": "{project}/zones/{zone}/instances/{instance}/reset",
"httpMethod": "POST",
- "description": "Performs a hard reset on the instance.",
+ "description": "Performs a reset on the instance. For more information, see Resetting an instance.",
"parameters": {
"instance": {
"type": "string",
@@ -12985,6 +13377,50 @@
"https://www.googleapis.com/auth/compute"
]
},
+ "setServiceAccount": {
+ "id": "compute.instances.setServiceAccount",
+ "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount",
+ "httpMethod": "POST",
+ "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance resource to start.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "The name of the zone for this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "request": {
+ "$ref": "InstancesSetServiceAccountRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
"setTags": {
"id": "compute.instances.setTags",
"path": "{project}/zones/{zone}/instances/{instance}/setTags",
@@ -13210,11 +13646,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -13302,11 +13737,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -13459,11 +13893,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -13810,11 +14243,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -13863,8 +14295,7 @@
"parameters": {
"autoscaler": {
"type": "string",
- "description": "Name of the autoscaler to update.",
- "required": true,
+ "description": "Name of the autoscaler to patch.",
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "query"
},
@@ -13885,8 +14316,7 @@
},
"parameterOrder": [
"project",
- "region",
- "autoscaler"
+ "region"
],
"request": {
"$ref": "Autoscaler"
@@ -13896,7 +14326,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -14121,11 +14552,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -14174,7 +14604,7 @@
"parameters": {
"backendService": {
"type": "string",
- "description": "Name of the BackendService resource to update.",
+ "description": "Name of the BackendService resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -14207,7 +14637,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -14262,7 +14693,7 @@
"id": "compute.regionInstanceGroupManagers.abandonInstances",
"path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances",
"httpMethod": "POST",
- "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.",
+ "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.",
"parameters": {
"instanceGroupManager": {
"type": "string",
@@ -14343,7 +14774,7 @@
"id": "compute.regionInstanceGroupManagers.deleteInstances",
"path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances",
"httpMethod": "POST",
- "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.",
+ "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.",
"parameters": {
"instanceGroupManager": {
"type": "string",
@@ -14425,7 +14856,7 @@
"id": "compute.regionInstanceGroupManagers.insert",
"path": "{project}/regions/{region}/instanceGroupManagers",
"httpMethod": "POST",
- "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.",
+ "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.",
"parameters": {
"project": {
"type": "string",
@@ -14469,11 +14900,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -14534,7 +14964,6 @@
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"order_by": {
@@ -14577,7 +15006,7 @@
"id": "compute.regionInstanceGroupManagers.recreateInstances",
"path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances",
"httpMethod": "POST",
- "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.",
+ "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.",
"parameters": {
"instanceGroupManager": {
"type": "string",
@@ -14804,11 +15233,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -14867,11 +15295,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15054,11 +15481,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15150,11 +15576,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15204,11 +15629,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15415,11 +15839,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15464,7 +15887,7 @@
"id": "compute.routers.patch",
"path": "{project}/regions/{region}/routers/{router}",
"httpMethod": "PATCH",
- "description": "Updates the specified Router resource with the data included in the request. This method supports patch semantics.",
+ "description": "Patches the specified Router resource with the data included in the request. This method supports patch semantics.",
"parameters": {
"project": {
"type": "string",
@@ -15482,7 +15905,7 @@
},
"router": {
"type": "string",
- "description": "Name of the Router resource to update.",
+ "description": "Name of the Router resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -15501,7 +15924,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"preview": {
@@ -15705,11 +16129,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15826,11 +16249,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -15975,11 +16397,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -16029,11 +16450,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -16242,11 +16662,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -16286,6 +16705,50 @@
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly"
]
+ },
+ "setPrivateIpGoogleAccess": {
+ "id": "compute.subnetworks.setPrivateIpGoogleAccess",
+ "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess",
+ "httpMethod": "POST",
+ "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Cloudpath.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Project ID for this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "subnetwork": {
+ "type": "string",
+ "description": "Name of the Subnetwork resource.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "subnetwork"
+ ],
+ "request": {
+ "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute"
+ ]
}
}
},
@@ -16399,11 +16862,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -16584,11 +17046,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -16710,11 +17171,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -16879,11 +17339,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -17029,11 +17488,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -17243,11 +17701,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -17538,11 +17995,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -17700,11 +18156,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -17869,11 +18324,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -18062,11 +18516,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -18103,7 +18556,7 @@
"id": "compute.urlMaps.patch",
"path": "{project}/global/urlMaps/{urlMap}",
"httpMethod": "PATCH",
- "description": "Updates the specified UrlMap resource with the data included in the request. This method supports patch semantics.",
+ "description": "Patches the specified UrlMap resource with the data included in the request. This method supports patch semantics.",
"parameters": {
"project": {
"type": "string",
@@ -18114,7 +18567,7 @@
},
"urlMap": {
"type": "string",
- "description": "Name of the UrlMap resource to update.",
+ "description": "Name of the UrlMap resource to patch.",
"required": true,
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"location": "path"
@@ -18132,7 +18585,8 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
]
},
"update": {
@@ -18224,11 +18678,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -18393,11 +18846,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -18535,11 +18987,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
@@ -18631,11 +19082,10 @@
},
"maxResults": {
"type": "integer",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.",
+ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)",
"default": "500",
"format": "uint32",
"minimum": "0",
- "maximum": "500",
"location": "query"
},
"orderBy": {
diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go
index 424e855e2..a2242d45a 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go
@@ -73,6 +73,7 @@ func New(client *http.Client) (*Service, error) {
s := &Service{client: client, BasePath: basePath}
s.Addresses = NewAddressesService(s)
s.Autoscalers = NewAutoscalersService(s)
+ s.BackendBuckets = NewBackendBucketsService(s)
s.BackendServices = NewBackendServicesService(s)
s.DiskTypes = NewDiskTypesService(s)
s.Disks = NewDisksService(s)
@@ -126,6 +127,8 @@ type Service struct {
Autoscalers *AutoscalersService
+ BackendBuckets *BackendBucketsService
+
BackendServices *BackendServicesService
DiskTypes *DiskTypesService
@@ -234,6 +237,15 @@ type AutoscalersService struct {
s *Service
}
+func NewBackendBucketsService(s *Service) *BackendBucketsService {
+ rs := &BackendBucketsService{s: s}
+ return rs
+}
+
+type BackendBucketsService struct {
+ s *Service
+}
+
func NewBackendServicesService(s *Service) *BackendServicesService {
rs := &BackendServicesService{s: s}
return rs
@@ -610,7 +622,9 @@ type AccessConfig struct {
// for access configs.
Kind string `json:"kind,omitempty"`
- // Name: Name of this access configuration.
+ // Name: The name of this access configuration. The default and
+ // recommended name is External NAT but you can use any arbitrary string
+ // you would like. For example, My external IP or Network Access.
Name string `json:"name,omitempty"`
// NatIP: An external IP address associated with this instance. Specify
@@ -881,6 +895,7 @@ type AddressesScopedListWarning struct {
// "NOT_CRITICAL_ERROR"
// "NO_RESULTS_ON_PAGE"
// "REQUIRED_TOS_AGREEMENT"
+ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING"
// "RESOURCE_NOT_DELETED"
// "SINGLE_INSTANCE_PROPERTY_TEMPLATE"
// "UNREACHABLE"
@@ -1419,6 +1434,7 @@ type AutoscalersScopedListWarning struct {
// "NOT_CRITICAL_ERROR"
// "NO_RESULTS_ON_PAGE"
// "REQUIRED_TOS_AGREEMENT"
+ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING"
// "RESOURCE_NOT_DELETED"
// "SINGLE_INSTANCE_PROPERTY_TEMPLATE"
// "UNREACHABLE"
@@ -1858,6 +1874,115 @@ func (s *Backend) UnmarshalJSON(data []byte) error {
return nil
}
+// BackendBucket: A BackendBucket resource. This resource defines a
+// Cloud Storage bucket.
+type BackendBucket struct {
+ // BucketName: Cloud Storage bucket name.
+ BucketName string `json:"bucketName,omitempty"`
+
+ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text
+ // format.
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // EnableCdn: If true, enable Cloud CDN for this BackendBucket.
+ EnableCdn bool `json:"enableCdn,omitempty"`
+
+ // Id: [Output Only] Unique identifier for the resource; defined by the
+ // server.
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource. Provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035. Specifically, the name must be 1-63 characters long and
+ // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means
+ // the first character must be a lowercase letter, and all following
+ // characters must be a dash, lowercase letter, or digit, except the
+ // last character, which cannot be a dash.
+ Name string `json:"name,omitempty"`
+
+ // SelfLink: [Output Only] Server-defined URL for the resource.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "BucketName") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+
+ // NullFields is a list of field names (e.g. "BucketName") to include in
+ // API requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ NullFields []string `json:"-"`
+}
+
+func (s *BackendBucket) MarshalJSON() ([]byte, error) {
+ type noMethod BackendBucket
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
+// BackendBucketList: Contains a list of BackendBucket resources.
+type BackendBucketList struct {
+ // Id: [Output Only] Unique identifier for the resource; defined by the
+ // server.
+ Id string `json:"id,omitempty"`
+
+ // Items: A list of BackendBucket resources.
+ Items []*BackendBucket `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: [Output Only] A token used to continue a truncated
+ // list request.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: [Output Only] Server-defined URL for this resource.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Id") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+
+ // NullFields is a list of field names (e.g. "Id") to include in API
+ // requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ NullFields []string `json:"-"`
+}
+
+func (s *BackendBucketList) MarshalJSON() ([]byte, error) {
+ type noMethod BackendBucketList
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
// BackendService: A BackendService resource. This resource defines a
// group of backend virtual machines and their serving capacity.
type BackendService struct {
@@ -1872,6 +1997,9 @@ type BackendService struct {
// Backends: The list of backends that serve this BackendService.
Backends []*Backend `json:"backends,omitempty"`
+ // CdnPolicy: Cloud CDN configuration for this BackendService.
+ CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"`
+
ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"`
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text
@@ -1941,7 +2069,7 @@ type BackendService struct {
// Protocol: The protocol this BackendService uses to communicate with
// backends.
//
- // Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is
+ // Possible values are HTTP, HTTPS, TCP, and SSL. The default is
// HTTP.
//
// For internal load balancing, the possible values are TCP and UDP, and
@@ -2062,6 +2190,36 @@ func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
+// BackendServiceCdnPolicy: Message containing Cloud CDN configuration
+// for a backend service.
+type BackendServiceCdnPolicy struct {
+ // CacheKeyPolicy: The CacheKeyPolicy for this CdnPolicy.
+ CacheKeyPolicy *CacheKeyPolicy `json:"cacheKeyPolicy,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "CacheKeyPolicy") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+
+ // NullFields is a list of field names (e.g. "CacheKeyPolicy") to
+ // include in API requests with the JSON null value. By default, fields
+ // with empty values are omitted from API requests. However, any field
+ // with an empty value appearing in NullFields will be sent to the
+ // server as null. It is an error if a field in this list has a
+ // non-empty value. This may be used to include null fields in Patch
+ // requests.
+ NullFields []string `json:"-"`
+}
+
+func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) {
+ type noMethod BackendServiceCdnPolicy
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
type BackendServiceGroupHealth struct {
HealthStatus []*HealthStatus `json:"healthStatus,omitempty"`
@@ -2109,8 +2267,12 @@ type BackendServiceList struct {
// compute#backendServiceList for lists of backend services.
Kind string `json:"kind,omitempty"`
- // NextPageToken: [Output Only] A token used to continue a truncated
- // list request.
+ // NextPageToken: [Output Only] This token allows you to get the next
+ // page of results for list requests. If the number of results is larger
+ // than maxResults, use the nextPageToken as a value for the query
+ // parameter pageToken in the next list request. Subsequent list
+ // requests will have their own nextPageToken to continue paging through
+ // the results.
NextPageToken string `json:"nextPageToken,omitempty"`
// SelfLink: [Output Only] Server-defined URL for this resource.
@@ -2196,6 +2358,7 @@ type BackendServicesScopedListWarning struct {
// "NOT_CRITICAL_ERROR"
// "NO_RESULTS_ON_PAGE"
// "REQUIRED_TOS_AGREEMENT"
+ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING"
// "RESOURCE_NOT_DELETED"
// "SINGLE_INSTANCE_PROPERTY_TEMPLATE"
// "UNREACHABLE"
@@ -2300,6 +2463,59 @@ func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
+// CacheKeyPolicy: Message containing what to include in the cache key
+// for a request for Cloud CDN.
+type CacheKeyPolicy struct {
+ // IncludeHost: If true, requests to different hosts will be cached
+ // separately.
+ IncludeHost bool `json:"includeHost,omitempty"`
+
+ // IncludeProtocol: If true, http and https requests will be cached
+ // separately.
+ IncludeProtocol bool `json:"includeProtocol,omitempty"`
+
+ // IncludeQueryString: If true, include query string parameters in the
+ // cache key according to query_string_whitelist and
+ // query_string_blacklist. If neither is set, the entire query string
+ // will be included. If false, the query string will be excluded from
+ // the cache key entirely.
+ IncludeQueryString bool `json:"includeQueryString,omitempty"`
+
+ // QueryStringBlacklist: Names of query string parameters to exclude in
+ // cache keys. All other parameters will be included. Either specify
+ // query_string_whitelist or query_string_blacklist, not both. '&' and
+ // '=' will be percent encoded and not treated as delimiters.
+ QueryStringBlacklist []string `json:"queryStringBlacklist,omitempty"`
+
+ // QueryStringWhitelist: Names of query string parameters to include in
+ // cache keys. All other parameters will be excluded. Either specify
+ // query_string_whitelist or query_string_blacklist, not both. '&' and
+ // '=' will be percent encoded and not treated as delimiters.
+ QueryStringWhitelist []string `json:"queryStringWhitelist,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "IncludeHost") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+
+ // NullFields is a list of field names (e.g. "IncludeHost") to include
+ // in API requests with the JSON null value. By default, fields with
+ // empty values are omitted from API requests. However, any field with
+ // an empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ NullFields []string `json:"-"`
+}
+
+func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) {
+ type noMethod CacheKeyPolicy
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
// ConnectionDraining: Message containing connection draining
// configuration.
type ConnectionDraining struct {
@@ -2658,7 +2874,8 @@ type DiskAggregatedList struct {
// than maxResults, use the nextPageToken as a value for the query
// parameter pageToken in the next list request. Subsequent list
// requests will have their own nextPageToken to continue paging through
- // the results.
+ // the results. Acceptable values are 0 to 500, inclusive. (Default:
+ // 500)
NextPageToken string `json:"nextPageToken,omitempty"`
// SelfLink: [Output Only] Server-defined URL for this resource.
@@ -2693,23 +2910,22 @@ func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) {
// DiskList: A list of Disk resources.
type DiskList struct {
- // Id: [Output Only] The unique identifier for the resource. This
- // identifier is defined by the server.
+ // Id: [Output Only] Unique identifier for the resource; defined by the
+ // server.
Id string `json:"id,omitempty"`
- // Items: [Output Only] A list of persistent disks.
+ // Items: A list of Disk resources.
Items []*Disk `json:"items,omitempty"`
// Kind: [Output Only] Type of resource. Always compute#diskList for
// lists of disks.
Kind string `json:"kind,omitempty"`
- // NextPageToken: [Output Only] This token allows you to get the next
- // page of results for list requests. If the number of results is larger
- // than maxResults, use the nextPageToken as a value for the query
- // parameter pageToken in the next list request. Subsequent list
- // requests will have their own nextPageToken to continue paging through
- // the results.
+ // NextPageToken: This token allows you to get the next page of results
+ // for list requests. If the number of results is larger than
+ // maxResults, use the nextPageToken as a value for the query parameter
+ // pageToken in the next list request. Subsequent list requests will
+ // have their own nextPageToken to continue paging through the results.
NextPageToken string `json:"nextPageToken,omitempty"`
// SelfLink: [Output Only] Server-defined URL for this resource.
@@ -3004,6 +3220,7 @@ type DiskTypesScopedListWarning struct {
// "NOT_CRITICAL_ERROR"
// "NO_RESULTS_ON_PAGE"
// "REQUIRED_TOS_AGREEMENT"
+ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING"
// "RESOURCE_NOT_DELETED"
// "SINGLE_INSTANCE_PROPERTY_TEMPLATE"
// "UNREACHABLE"
@@ -3158,6 +3375,7 @@ type DisksScopedListWarning struct {
// "NOT_CRITICAL_ERROR"
// "NO_RESULTS_ON_PAGE"
// "REQUIRED_TOS_AGREEMENT"
+ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING"
// "RESOURCE_NOT_DELETED"
// "SINGLE_INSTANCE_PROPERTY_TEMPLATE"
// "UNREACHABLE"
@@ -3287,7 +3505,7 @@ type Firewall struct {
// will apply to traffic that has source IP address within sourceRanges
// OR the source IP that belongs to a tag listed in the sourceTags
// property. The connection does not need to match both properties for
- // the firewall to apply.
+ // the firewall to apply. Only IPv4 is supported.
SourceRanges []string `json:"sourceRanges,omitempty"`
// SourceTags: If source tags are specified, the firewall will apply
@@ -3430,7 +3648,7 @@ type ForwardingRule struct {
// IPAddress: The IP address that this forwarding rule is serving on
// behalf of.
//
- // For global forwarding rules, the address must be a global IP; for
+ // For global forwarding rules, the address must be a global IP. For
// regional forwarding rules, the address must live in the same region
// as the forwarding rule. By default, this field is empty and an
// ephemeral IP from the same scope (global or regional) will be
@@ -3441,18 +3659,19 @@ type ForwardingRule struct {
// the forwarding rule. A reserved address cannot be used. If the field
// is empty, the IP address will be automatically allocated from the
// internal IP range of the subnetwork or network configured for this
- // forwarding rule.
+ // forwarding rule. Only IPv4 is supported.
IPAddress string `json:"IPAddress,omitempty"`
// IPProtocol: The IP protocol to which this rule applies. Valid options
// are TCP, UDP, ESP, AH, SCTP or ICMP.
//
- // When the load balancing scheme is INTERNAL
+
+ Stack Exchange: Terraform questions often get asked and
+ answered on
+ Server Fault and
+ Stack Overflow. Use the tag
+ "terraform" to help your question be found by Terraform experts, and please
+ be respectful of the "How to Ask" guidelines in each community.
+
Gitter: Terraform Gitter Room
@@ -22,7 +30,7 @@ description: |-
Mailing list: Terraform Google Group
- Bug Tracker: Issue tracker on GitHub. Please only use this for reporting bugs. Do not ask for general help here. Use IRC or the mailing list for that.
+ Bug Tracker: Issue tracker on GitHub. Please only use this for reporting bugs. Do not ask for general help here; use a Stack Exchange community, Gitter chat, or the mailing list for that.
Training: Paid HashiCorp training courses are also available in a city near you. Private training courses are also available.
diff --git a/website/source/docs/commands/get.html.markdown b/website/source/docs/commands/get.html.markdown
index 4666626cb..a7eb32ad6 100644
--- a/website/source/docs/commands/get.html.markdown
+++ b/website/source/docs/commands/get.html.markdown
@@ -9,7 +9,7 @@ description: |-
# Command: get
The `terraform get` command is used to download and update
-[modules](/docs/modules/index.html).
+[modules](/docs/modules/index.html) mentioned in the root module.
## Usage
@@ -28,3 +28,4 @@ The command-line flags are all optional. The list of available flags are:
* `-update` - If specified, modules that are already downloaded will be
checked for updates and the updates will be downloaded if present.
+* `dir` - Sets the path of the [root module](/docs/modules/index.html#definitions).
diff --git a/website/source/docs/configuration/data-sources.html.md b/website/source/docs/configuration/data-sources.html.md
index 1a0138345..6897eb34a 100644
--- a/website/source/docs/configuration/data-sources.html.md
+++ b/website/source/docs/configuration/data-sources.html.md
@@ -74,6 +74,10 @@ resource "aws_instance" "web" {
}
```
+### Meta-parameters
+
+As data sources are essentially a read only subset of resources they also support the same [meta-parameters](https://www.terraform.io/docs/configuration/resources.html#meta-parameters) of resources except for the [`lifecycle` configuration block](https://www.terraform.io/docs/configuration/resources.html#lifecycle).
+
## Multiple Provider Instances
Similarly to [resources](/docs/configuration/resources.html), the
diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md
index aef902d6e..3b855cdee 100644
--- a/website/source/docs/configuration/interpolation.html.md
+++ b/website/source/docs/configuration/interpolation.html.md
@@ -61,6 +61,13 @@ attribute set, you can access individual attributes with a zero-based
index, such as `${aws_instance.web.0.id}`. You can also use the splat
syntax to get a list of all the attributes: `${aws_instance.web.*.id}`.
+#### Attributes of a data source
+
+The syntax is `data.TYPE.NAME.ATTRIBUTE`. For example. `${data.aws_ami.ubuntu.id}` will interpolate the `id` attribute from the `aws_ami` [data source](/docs/configuration/data-sources.html) named `ubuntu`. If the data source has a `count`
+attribute set, you can access individual attributes with a zero-based
+index, such as `${data.aws_subnet.example.0.cidr_block}`. You can also use the splat
+syntax to get a list of all the attributes: `${data.aws_subnet.example.*.cidr_block}`.
+
#### Outputs from a module
The syntax is `MODULE.NAME.OUTPUT`. For example `${module.foo.bar}` will
@@ -151,6 +158,11 @@ The supported built-in functions are:
**This is not equivalent** of `base64encode(sha256(string))`
since `sha256()` returns hexadecimal representation.
+ * `base64sha512(string)` - Returns a base64-encoded representation of raw
+ SHA-512 sum of the given string.
+ **This is not equivalent** of `base64encode(sha512(string))`
+ since `sha512()` returns hexadecimal representation.
+
* `ceil(float)` - Returns the least integer value greater than or equal
to the argument.
@@ -314,6 +326,10 @@ The supported built-in functions are:
SHA-256 hash of the given string.
Example: `"${sha256("${aws_vpc.default.tags.customer}-s3-bucket")}"`
+ * `sha512(string)` - Returns a (conventional) hexadecimal representation of the
+ SHA-512 hash of the given string.
+ Example: `"${sha512("${aws_vpc.default.tags.customer}-s3-bucket")}"`
+
* `signum(int)` - Returns `-1` for negative numbers, `0` for `0` and `1` for positive numbers.
This function is useful when you need to set a value for the first resource and
a different value for the rest of the resources.
diff --git a/website/source/docs/enterprise/state/collaborating.html.md b/website/source/docs/enterprise/state/collaborating.html.md
index a5e799f47..417887e5a 100755
--- a/website/source/docs/enterprise/state/collaborating.html.md
+++ b/website/source/docs/enterprise/state/collaborating.html.md
@@ -8,7 +8,7 @@ description: |-
# Collaborating on Terraform Remote State
-Terraform Enterprise is one of a few options to store [remote state](/docs/enterprise/state).
+Terraform Enterprise is one of a few options to store [remote state](/docs/state/remote.html).
Remote state gives you the ability to version and collaborate on Terraform
changes. It stores information about the changes Terraform makes based on
@@ -18,6 +18,5 @@ In order to collaborate safely on remote state, we recommend
[creating an organization](/docs/enterprise/organizations/create.html) to
manage teams of users.
-Then, following a [remote state push](/docs/enterprise/state) you can view state
-versions in the changes tab of the environment created under the same name as
-the remote state.
+Then, following a [Terraform Enterprise Run](/docs/enterprise/runs) or [`apply`](/docs/commands/apply.html)
+you can view state versions in the `States` list of the environment.
diff --git a/website/source/docs/enterprise/state/index.html.md b/website/source/docs/enterprise/state/index.html.md
index 047cfee28..3b8a0589b 100755
--- a/website/source/docs/enterprise/state/index.html.md
+++ b/website/source/docs/enterprise/state/index.html.md
@@ -8,17 +8,17 @@ description: |-
# State
-Terraform stores the state of your managed infrastructure from the last time
-Terraform was run. By default this state is stored in a local file named
-`terraform.tfstate`, but it can also be stored remotely, which works better in a
-team environment.
-
-Terraform Enterprise is a remote state provider, allowing you to store, version
-and collaborate on states.
+Terraform Enterprise stores the state of your managed infrastructure from the
+last time Terraform was run. The state is stored remotely, which works better in a
+team environment, allowing you to store, version and collaborate on state.
Remote state gives you more than just easier version control and safer storage.
It also allows you to delegate the outputs to other teams. This allows your
infrastructure to be more easily broken down into components that multiple teams
can access.
-Read [more about remote state](https://www.terraform.io/docs/state/remote.html).
+Remote state is automatically updated when you run [`apply`](/docs/commands/apply.html)
+locally. It is also updated when an `apply` is executed in a [Terraform Enterprise
+Run](/docs/enterprise/runs/index.html).
+
+Read [more about remote state](/docs/state/remote.html).
diff --git a/website/source/docs/enterprise/state/pushing.html.md b/website/source/docs/enterprise/state/pushing.html.md
old mode 100755
new mode 100644
index 4e9545fda..ad058d144
--- a/website/source/docs/enterprise/state/pushing.html.md
+++ b/website/source/docs/enterprise/state/pushing.html.md
@@ -17,7 +17,9 @@ configuration.
To use Terraform Enterprise to store remote state, you'll first need to have the
`ATLAS_TOKEN` environment variable set and run the following command.
+**NOTE:** `terraform remote config` command has been deprecated in 0.9.X. Remote configuration is now managed as a [backend configuration](/docs/backends/config.html).
+
```shell
$ terraform remote config \
-backend-config="name=$USERNAME/product"
-```
+```
\ No newline at end of file
diff --git a/website/source/docs/enterprise/state/resolving-conflicts.html.md b/website/source/docs/enterprise/state/resolving-conflicts.html.md
index 0de4069d6..e31a7fafa 100755
--- a/website/source/docs/enterprise/state/resolving-conflicts.html.md
+++ b/website/source/docs/enterprise/state/resolving-conflicts.html.md
@@ -35,36 +35,22 @@ operation.
### Using Terraform Locally
-Another way to resolve remote state conflicts is to merge and conflicted copies
-locally by inspecting the raw state available in the path
-`.terraform/terraform.tfstate`.
+Another way to resolve remote state conflicts is by manual intervention of the
+state file.
-When making state changes, it's important to make backup copies in order to
-avoid losing any data.
+Use the [`state pull`](/docs/commands/state/pull.html) subcommand to pull the
+remote state into a local state file.
-Any state that is pushed with a serial that is lower than the known serial when
-the MD5 of the state does not match will be rejected.
-
-The serial is embedded in the state file:
-
-```json
-{
- "version": 1,
- "serial": 555,
- "remote": {
- "type": "atlas",
- "config": {
- "name": "my-username/production"
- }
- }
-}
+```shell
+$ terraform state pull > example.tfstate
```
Once a conflict has been resolved locally by editing the state file, the serial
-can be incremented past the current version and pushed:
+can be incremented past the current version and pushed with the
+[`state push`](/docs/commands/state/push.html) subcommand:
```shell
-$ terraform remote push
+$ terraform state push example.tfstate
```
This will upload the manually resolved state and set it as the head version.
diff --git a/website/source/docs/import/importability.html.md b/website/source/docs/import/importability.html.md
index b6b22e609..628881886 100644
--- a/website/source/docs/import/importability.html.md
+++ b/website/source/docs/import/importability.html.md
@@ -150,6 +150,7 @@ To make a resource importable, please see the
* google_compute_instance_group_manager
* google_compute_instance_template
* google_compute_target_pool
+* google_dns_managed_zone
* google_project
### OpenStack
diff --git a/website/source/docs/modules/index.html.markdown b/website/source/docs/modules/index.html.markdown
index cacccf847..96db65043 100644
--- a/website/source/docs/modules/index.html.markdown
+++ b/website/source/docs/modules/index.html.markdown
@@ -15,3 +15,8 @@ in Terraform as well as for basic code organization.
Modules are very easy to both use and create. Depending on what you're
looking to do first, use the navigation on the left to dive into how
modules work.
+
+## Definitions
+**Root module**
+That is the current working directory when you run [`terraform apply`](/docs/commands/apply.html) or [`get`](/docs/commands/get.html), holding the Terraform [configuration files](/docs/configuration/index.html).
+It is itself a valid module.
diff --git a/website/source/docs/providers/aws/d/efs_file_system.html.markdown b/website/source/docs/providers/aws/d/efs_file_system.html.markdown
new file mode 100644
index 000000000..4441e01eb
--- /dev/null
+++ b/website/source/docs/providers/aws/d/efs_file_system.html.markdown
@@ -0,0 +1,39 @@
+---
+layout: "aws"
+page_title: "AWS: efs_file_system"
+sidebar_current: "docs-aws-datasource-efs-file-system"
+description: |-
+ Provides an Elastic File System (EFS) data source.
+---
+
+# aws_efs_file_system
+
+Provides information about an Elastic File System (EFS).
+
+## Example Usage
+
+```hcl
+variable "file_system_id" {
+ type = "string"
+ default = ""
+}
+
+data "aws_efs_file_system" "by_id" {
+ file_system_id = "${var.file_system_id}"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `file_system_id` - (Optional) The ID that identifies the file system (e.g. fs-ccfc0d65).
+* `creation_token` - (Optional) Restricts the list to the file system with this creation token
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `performance_mode` - The PerformanceMode of the file system.
+* `tags` - The list of tags assigned to the file system.
+
diff --git a/website/source/docs/providers/aws/d/subnet_ids.html.markdown b/website/source/docs/providers/aws/d/subnet_ids.html.markdown
index 871da0ca2..000161258 100644
--- a/website/source/docs/providers/aws/d/subnet_ids.html.markdown
+++ b/website/source/docs/providers/aws/d/subnet_ids.html.markdown
@@ -23,7 +23,7 @@ data "aws_subnet_ids" "example" {
data "aws_subnet" "example" {
count = "${length(data.aws_subnet_ids.example.ids)}"
- id = "${aws_subnet_ids.example.ids[count.index]}"
+ id = "${data.aws_subnet_ids.example.ids[count.index]}"
}
output "subnet_cidr_blocks" {
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 72b2ab1ef..880dbe854 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -112,6 +112,7 @@ what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances.
* `kms_key_id` - (Optional) The ARN for the KMS encryption key.
* `character_set_name` - (Optional) The character set name to use for DB encoding in Oracle instances. This can't be changed.
[Oracle Character Sets Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html)
+* `iam_database_authentication_enabled` - (Optional) Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
* `tags` - (Optional) A mapping of tags to assign to the resource.
* `timezone` - (Optional) Time zone of the DB instance. `timezone` is currently only supported by Microsoft SQL Server.
The `timezone` can only be set on creation. See [MSSQL User Guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) for more information
diff --git a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
index b3cd64cf4..3a07f70b8 100644
--- a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
+++ b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
@@ -38,6 +38,11 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" {
type = "N"
}
+ ttl {
+ attribute_name = "TimeToExist"
+ enabled = false
+ }
+
global_secondary_index {
name = "GameTitleIndex"
hash_key = "GameTitle"
@@ -72,6 +77,7 @@ The following arguments are supported:
* `type` - One of: S, N, or B for (S)tring, (N)umber or (B)inary data
* `stream_enabled` - (Optional) Indicates whether Streams are to be enabled (true) or disabled (false).
* `stream_view_type` - (Optional) When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES.
+* `ttl` - (Optional) Indicates whether time to live is enabled (true) or disabled (false) and the `attribute_name` to be used.
* `local_secondary_index` - (Optional, Forces new resource) Describe an LSI on the table;
these can only be allocated *at creation* so you cannot change this
definition after you have created the resource.
diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown
index ef27d6d84..87591ba5f 100644
--- a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown
+++ b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown
@@ -50,7 +50,7 @@ The following arguments are supported:
off of. Example stacks can be found in the [Amazon API documentation][1]
* `template_name` – (Optional) The name of the Elastic Beanstalk Configuration
template to use in deployment
-* `wait_for_ready_timeout` - (Default: `10m`) The maximum
+* `wait_for_ready_timeout` - (Default: `20m`) The maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should
wait for an Elastic Beanstalk Environment to be in a ready state before timing
out.
diff --git a/website/source/docs/providers/aws/r/emr_cluster.html.md b/website/source/docs/providers/aws/r/emr_cluster.html.md
index cca374e58..dd2beb0ff 100644
--- a/website/source/docs/providers/aws/r/emr_cluster.html.md
+++ b/website/source/docs/providers/aws/r/emr_cluster.html.md
@@ -67,6 +67,7 @@ The following arguments are supported:
* `release_label` - (Required) The release label for the Amazon EMR release
* `master_instance_type` - (Required) The EC2 instance type of the master node
* `service_role` - (Required) IAM role that will be assumed by the Amazon EMR service to access AWS resources
+* `security_configuration` - (Optional) The security configuration name to attach to the EMR cluster. Only valid for EMR clusters with `release_label` 4.8.0 or greater
* `core_instance_type` - (Optional) The EC2 instance type of the slave nodes
* `core_instance_count` - (Optional) Number of Amazon EC2 instances used to execute the job flow. EMR will use one node as the cluster's master node and use the remainder of the nodes (`core_instance_count`-1) as core nodes. Default `1`
* `log_uri` - (Optional) S3 bucket to write the log files of the job flow. If a value
diff --git a/website/source/docs/providers/aws/r/emr_security_configuration.html.markdown b/website/source/docs/providers/aws/r/emr_security_configuration.html.markdown
new file mode 100644
index 000000000..54717817f
--- /dev/null
+++ b/website/source/docs/providers/aws/r/emr_security_configuration.html.markdown
@@ -0,0 +1,63 @@
+---
+layout: "aws"
+page_title: "AWS: aws_emr_security_configuraiton"
+sidebar_current: "docs-aws-resource-emr-security-configuration"
+description: |-
+ Provides a resource to manage AWS EMR Security Configurations
+---
+
+# aws\_emr\_security\_configuration
+
+Provides a resource to manage AWS EMR Security Configurations
+
+## Example Usage
+
+```hcl
+resource "aws_emr_security_configuration" "foo" {
+ name = "emrsc_other"
+
+ configuration = < **NOTE on `azurerm_sql_elasticpool`:** - The values of `edition`, `dtu`, and `pool_size` must be consistent with the [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus). Any inconsistent argument configuration will be rejected.
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the elastic pool.
+
+* `resource_group_name` - (Required) The name of the resource group in which to create the elastic pool. This must be the same as the resource group of the underlying SQL server.
+
+* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
+
+* `server_name` - (Required) The name of the SQL Server on which to create the elastic pool. Changing this forces a new resource to be created.
+
+* `edition` - (Required) The edition of the elastic pool to be created. Valid values are `Basic`, `Standard`, and `Premium`. Refer to [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus) for details. Changing this forces a new resource to be created.
+
+* `dtu` - (Required) The total shared DTU for the elastic pool. Valid values depend on the `edition` which has been defined. Refer to [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus) for valid combinations.
+
+* `db_dtu_min` - (Optional) The minimum DTU which will be guaranteed to all databases in the elastic pool to be created.
+
+* `db_dtu_max` - (Optional) The maximum DTU which will be guaranteed to all databases in the elastic pool to be created.
+
+* `pool_size` - (Optional) The maximum size in MB that all databases in the elastic pool can grow to. The maximum size must be consistent with combination of `edition` and `dtu` and the limits documented in [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus). If not defined when creating an elastic pool, the value is set to the size implied by `edition` and `dtu`.
+
+* `tags` - (Optional) A mapping of tags to assign to the resource.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The SQL Elastic Pool ID.
+
+* `creation_date` - The creation date of the SQL Elastic Pool.
diff --git a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown
index 5dbc67762..825e16d86 100644
--- a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown
+++ b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown
@@ -98,6 +98,7 @@ The following arguments are supported:
The following attributes are exported:
* `id` - The Template Deployment ID.
+* `outputs` - A map of supported scalar output types returned from the deployment (currently, Azure Template Deployment outputs of type String, Int and Bool are supported, and are converted to strings - others will be ignored).
## Note
diff --git a/website/source/docs/providers/circonus/r/graph.html.markdown b/website/source/docs/providers/circonus/r/graph.html.markdown
index 47252c46f..47d2b5b80 100644
--- a/website/source/docs/providers/circonus/r/graph.html.markdown
+++ b/website/source/docs/providers/circonus/r/graph.html.markdown
@@ -170,7 +170,7 @@ resource "circonus_graph" "icmp-graph" {
It is possible to import a `circonus_graph` resource with the following command:
```
-$ terraform import circonus_graph.usage ID
+$ terraform import circonus_graph.icmp-graph ID
```
Where `ID` is the `_cid` or Circonus ID of the graph
diff --git a/website/source/docs/providers/circonus/r/rule_set.html.markdown b/website/source/docs/providers/circonus/r/rule_set.html.markdown
index ef4ba70d1..e07bba5fd 100644
--- a/website/source/docs/providers/circonus/r/rule_set.html.markdown
+++ b/website/source/docs/providers/circonus/r/rule_set.html.markdown
@@ -368,7 +368,7 @@ resource "circonus_rule_set" "icmp-latency-alert" {
It is possible to import a `circonus_rule_set` resource with the following command:
```
-$ terraform import circonus_rule_set.usage ID
+$ terraform import circonus_rule_set.icmp-latency-alert ID
```
Where `ID` is the `_cid` or Circonus ID of the Rule Set
diff --git a/website/source/docs/providers/fastly/r/service_v1.html.markdown b/website/source/docs/providers/fastly/r/service_v1.html.markdown
index 9d0ba0035..2f0a7cb2e 100644
--- a/website/source/docs/providers/fastly/r/service_v1.html.markdown
+++ b/website/source/docs/providers/fastly/r/service_v1.html.markdown
@@ -209,7 +209,7 @@ The `cache_setting` block supports:
* `name` - (Required) Unique name for this Cache Setting.
* `action` - (Required) One of `cache`, `pass`, or `restart`, as defined
on Fastly's documentation under ["Caching action descriptions"](https://docs.fastly.com/guides/performance-tuning/controlling-caching#caching-action-descriptions).
-* `cache_condition` - (Required) Name of already defined `condition` used to test whether this settings object should be used. This `condition` must be of type `CACHE`.
+* `cache_condition` - (Optional) Name of already defined `condition` used to test whether this settings object should be used. This `condition` must be of type `CACHE`.
* `stale_ttl` - (Optional) Max "Time To Live" for stale (unreachable) objects.
Default `300`.
* `ttl` - (Optional) The Time-To-Live (TTL) for the object.
@@ -263,7 +263,7 @@ The `request_setting` block allow you to customize Fastly's request handling, by
defining behavior that should change based on a predefined `condition`:
* `name` - (Required) The domain for this request setting.
-* `request_condition` - (Required) Name of already defined `condition` to
+* `request_condition` - (Optional) Name of already defined `condition` to
determine if this request setting should be applied.
* `max_stale_age` - (Optional) How old an object is allowed to be to serve
`stale-if-error` or `stale-while-revalidate`, in seconds. Default `60`.
diff --git a/website/source/docs/providers/github/r/repository.html.markdown b/website/source/docs/providers/github/r/repository.html.markdown
index 23fbd0416..a2b50c828 100644
--- a/website/source/docs/providers/github/r/repository.html.markdown
+++ b/website/source/docs/providers/github/r/repository.html.markdown
@@ -69,3 +69,12 @@ The following additional attributes are exported:
* `svn_url` - URL that can be provided to `svn checkout` to check out
the repository via Github's Subversion protocol emulation.
+
+
+## Import
+
+Repositories can be imported using the `name`, e.g.
+
+```
+$ terraform import github_repository.terraform terraform
+```
diff --git a/website/source/docs/providers/google/index.html.markdown b/website/source/docs/providers/google/index.html.markdown
index d0eecd113..a04f183d4 100644
--- a/website/source/docs/providers/google/index.html.markdown
+++ b/website/source/docs/providers/google/index.html.markdown
@@ -46,6 +46,10 @@ The following keys can be used to configure the provider.
* `GOOGLE_CLOUD_KEYFILE_JSON`
* `GCLOUD_KEYFILE_JSON`
+ The [`GOOGLE_APPLICATION_CREDENTIALS`](https://developers.google.com/identity/protocols/application-default-credentials#howtheywork)
+ environment variable can also contain the path of a file to obtain credentials
+ from.
+
* `project` - (Required) The ID of the project to apply any resources to. This
can be specified using any of the following environment variables (listed in
order of precedence):
diff --git a/website/source/docs/providers/google/r/compute_backend_bucket.html.markdown b/website/source/docs/providers/google/r/compute_backend_bucket.html.markdown
new file mode 100644
index 000000000..79f1de970
--- /dev/null
+++ b/website/source/docs/providers/google/r/compute_backend_bucket.html.markdown
@@ -0,0 +1,52 @@
+---
+layout: "google"
+page_title: "Google: google_compute_backend_bucket"
+sidebar_current: "docs-google-compute-backend-bucket"
+description: |-
+ Creates a Backend Bucket resource for Google Compute Engine.
+---
+
+# google\_compute\_backend\_bucket
+
+A Backend Bucket defines a Google Cloud Storage bucket that will serve traffic through Google Cloud
+Load Balancer.
+
+## Example Usage
+
+```hcl
+resource "google_compute_backend_bucket" "foobar" {
+ name = "image-backend-bucket"
+ description = "Contains beautiful images"
+ bucket_name = "${google_storage_bucket.image_bucket.name}"
+ enable_cdn = true
+}
+
+resource "google_storage_bucket" "image_bucket" {
+ name = "image-store-bucket"
+ location = "EU"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the backend bucket.
+
+* `bucket_name` - (Required) The name of the Google Cloud Storage bucket to be used as a backend
+ bucket.
+
+- - -
+
+* `description` - (Optional) The textual description for the backend bucket.
+
+* `enable_cdn` - (Optional) Whether or not to enable the Cloud CDN on the backend bucket.
+
+* `project` - (Optional) The project in which the resource belongs. If it is not provided, the
+ provider project is used.
+
+## Attributes Reference
+
+In addition to the arguments listed above, the following computed attributes are exported:
+
+* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown b/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown
index df04a6935..bc5607da0 100644
--- a/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown
+++ b/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown
@@ -80,7 +80,9 @@ The following arguments are supported:
* `description` - (Optional) Textual description field.
* `ip_address` - (Optional) The static IP. (if not set, an ephemeral IP is
- used).
+ used). This should be the literal IP address to be used, not the `self_link`
+ to a `google_compute_address` resource. (If using a `google_compute_address`
+ resource, use the `address` property instead of the `self_link` property.)
* `ip_protocol` - (Optional) The IP protocol to route, one of "TCP" "UDP" "AH"
"ESP" or "SCTP". (default "TCP").
diff --git a/website/source/docs/providers/google/r/compute_snapshot.html.markdown b/website/source/docs/providers/google/r/compute_snapshot.html.markdown
new file mode 100644
index 000000000..cdeb4fea9
--- /dev/null
+++ b/website/source/docs/providers/google/r/compute_snapshot.html.markdown
@@ -0,0 +1,66 @@
+---
+layout: "google"
+page_title: "Google: google_compute_snapshot"
+sidebar_current: "docs-google-compute-snapshot"
+description: |-
+ Creates a new snapshot of a disk within GCE.
+---
+
+# google\_compute\_snapshot
+
+Creates a new snapshot of a disk within GCE.
+
+## Example Usage
+
+```js
+resource "google_compute_snapshot" "default" {
+ name = "test-snapshot"
+ source_disk = "test-disk"
+ zone = "us-central1-a"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) A unique name for the resource, required by GCE.
+ Changing this forces a new resource to be created.
+
+* `zone` - (Required) The zone where the source disk is located.
+
+* `source_disk` - (Required) The disk which will be used as the source of the snapshot.
+
+- - -
+
+* `source_disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
+ (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
+ encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
+ to decrypt the source disk.
+
+* `snapshot_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
+ (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
+ encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
+ to encrypt this snapshot.
+
+* `project` - (Optional) The project in which the resource belongs. If it
+ is not provided, the provider project is used.
+
+## Attributes Reference
+
+In addition to the arguments listed above, the following computed attributes are
+exported:
+
+* `snapshot_encryption_key_sha256` - The [RFC 4648 base64]
+ (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the
+ [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption)
+ that protects this resource.
+
+* `source_disk_encryption_key_sha256` - The [RFC 4648 base64]
+ (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the
+ [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption)
+ that protects the source disk.
+
+* `source_disk_link` - The URI of the source disk.
+
+* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_url_map.html.markdown b/website/source/docs/providers/google/r/compute_url_map.html.markdown
index faad2a1ee..f28e83816 100644
--- a/website/source/docs/providers/google/r/compute_url_map.html.markdown
+++ b/website/source/docs/providers/google/r/compute_url_map.html.markdown
@@ -41,6 +41,11 @@ resource "google_compute_url_map" "foobar" {
paths = ["/login"]
service = "${google_compute_backend_service.login.self_link}"
}
+
+ path_rule {
+ paths = ["/static"]
+ service = "${google_compute_backend_bucket.static.self_link}"
+ }
}
test {
@@ -55,7 +60,6 @@ resource "google_compute_backend_service" "login" {
port_name = "http"
protocol = "HTTP"
timeout_sec = 10
- region = "us-central1"
health_checks = ["${google_compute_http_health_check.default.self_link}"]
}
@@ -65,7 +69,6 @@ resource "google_compute_backend_service" "home" {
port_name = "http"
protocol = "HTTP"
timeout_sec = 10
- region = "us-central1"
health_checks = ["${google_compute_http_health_check.default.self_link}"]
}
@@ -76,14 +79,25 @@ resource "google_compute_http_health_check" "default" {
check_interval_sec = 1
timeout_sec = 1
}
+
+resource "google_compute_backend_bucket" "static" {
+ name = "static-asset-backend-bucket"
+ bucket_name = "${google_storage_bucket.static.name}"
+ enable_cdn = true
+}
+
+resource "google_storage_bucket" "static" {
+ name = "static-asset-bucket"
+ location = "US"
+}
```
## Argument Reference
The following arguments are supported:
-* `default_service` - (Required) The URL of the backend service to use when none
- of the given rules match. See the documentation for formatting the service
+* `default_service` - (Required) The URL of the backend service or backend bucket to use when none
+ of the given rules match. See the documentation for formatting the service/bucket
URL
[here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#defaultService)
@@ -118,8 +132,8 @@ The `host_rule` block supports: (This block can be defined multiple times).
The `path_matcher` block supports: (This block can be defined multiple times)
-* `default_service` - (Required) The URL for the backend service to use if none
- of the given paths match. See the documentation for formatting the service
+* `default_service` - (Required) The URL for the backend service or backend bucket to use if none
+ of the given paths match. See the documentation for formatting the service/bucket
URL [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#pathMatcher.defaultService)
* `name` - (Required) The name of the `path_matcher` resource. Used by the
@@ -133,13 +147,13 @@ multiple times)
* `paths` - (Required) The list of paths to match against. See the
documentation for formatting these [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#pathMatchers.pathRules.paths)
-* `default_service` - (Required) The URL for the backend service to use if any
- of the given paths match. See the documentation for formatting the service
+* `service` - (Required) The URL for the backend service or backend bucket to use if any
+ of the given paths match. See the documentation for formatting the service/bucket
URL [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#pathMatcher.defaultService)
The optional `test` block supports: (This block can be defined multiple times)
-* `service` - (Required) The service that should be matched by this test.
+* `service` - (Required) The backend service or backend bucket that should be matched by this test.
* `host` - (Required) The host component of the URL being tested.
diff --git a/website/source/docs/providers/heroku/r/app.html.markdown b/website/source/docs/providers/heroku/r/app.html.markdown
index 0cf064e4f..9407eb779 100644
--- a/website/source/docs/providers/heroku/r/app.html.markdown
+++ b/website/source/docs/providers/heroku/r/app.html.markdown
@@ -1,7 +1,7 @@
---
layout: "heroku"
page_title: "Heroku: heroku_app"
-sidebar_current: "docs-heroku-resource-app"
+sidebar_current: "docs-heroku-resource-app-x"
description: |-
Provides a Heroku App resource. This can be used to create and manage applications on Heroku.
---
diff --git a/website/source/docs/providers/heroku/r/app_feature.html.markdown b/website/source/docs/providers/heroku/r/app_feature.html.markdown
new file mode 100644
index 000000000..c532b47d7
--- /dev/null
+++ b/website/source/docs/providers/heroku/r/app_feature.html.markdown
@@ -0,0 +1,28 @@
+---
+layout: "heroku"
+page_title: "Heroku: heroku_app_feature"
+sidebar_current: "docs-heroku-resource-app-feature"
+description: |-
+ Provides a Heroku App Feature resource. This can be used to create and manage App Features on Heroku.
+---
+
+# heroku\_app\_feature
+
+Provides a Heroku App Feature resource. This can be used to create and manage App Features on Heroku.
+
+## Example Usage
+
+```hcl
+resource "heroku_app_feature" "log_runtime_metrics" {
+ app = "test-app"
+ name = "log-runtime-metrics"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `app` - (Required) The Heroku app to link to.
+* `name` - (Required) The name of the App Feature to manage.
+* `enabled` - (Optional) Whether to enable or disable the App Feature. The default value is true.
diff --git a/website/source/docs/providers/heroku/r/pipeline.html.markdown b/website/source/docs/providers/heroku/r/pipeline.html.markdown
new file mode 100644
index 000000000..dcd38ab60
--- /dev/null
+++ b/website/source/docs/providers/heroku/r/pipeline.html.markdown
@@ -0,0 +1,62 @@
+---
+layout: "heroku"
+page_title: "Heroku: heroku_pipeline_"
+sidebar_current: "docs-heroku-resource-pipeline-x"
+description: |-
+ Provides a Heroku Pipeline resource.
+---
+
+# heroku\_pipeline
+
+
+Provides a [Heroku Pipeline](https://devcenter.heroku.com/articles/pipelines)
+resource.
+
+A pipeline is a group of Heroku apps that share the same codebase. Once a
+pipeline is created, and apps are added to different stages using
+[`heroku_pipeline_coupling`](./pipeline_coupling.html), you can promote app
+slugs to the next stage.
+
+## Example Usage
+
+```hcl
+# Create Heroku apps for staging and production
+resource "heroku_app" "staging" {
+ name = "test-app-staging"
+}
+
+resource "heroku_app" "production" {
+ name = "test-app-production"
+}
+
+# Create a Heroku pipeline
+resource "heroku_pipeline" "test-app" {
+ name = "test-app"
+}
+
+# Couple apps to different pipeline stages
+resource "heroku_pipeline_coupling" "staging" {
+ app = "${heroku_app.staging.name}"
+ pipeline = "${heroku_pipeline.test-app.id}"
+ stage = "staging"
+}
+
+resource "heroku_pipeline_coupling" "production" {
+ app = "${heroku_app.production.name}"
+ pipeline = "${heroku_pipeline.test-app.id}"
+ stage = "production"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the pipeline.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The UUID of the pipeline.
+* `name` - The name of the pipeline.
diff --git a/website/source/docs/providers/heroku/r/pipeline_coupling.html.markdown b/website/source/docs/providers/heroku/r/pipeline_coupling.html.markdown
new file mode 100644
index 000000000..90a5a9b50
--- /dev/null
+++ b/website/source/docs/providers/heroku/r/pipeline_coupling.html.markdown
@@ -0,0 +1,67 @@
+---
+layout: "heroku"
+page_title: "Heroku: heroku_pipeline_coupling"
+sidebar_current: "docs-heroku-resource-pipeline-coupling"
+description: |-
+ Provides a Heroku Pipeline Coupling resource.
+---
+
+# heroku\_pipeline\_coupling
+
+
+Provides a [Heroku Pipeline Coupling](https://devcenter.heroku.com/articles/pipelines)
+resource.
+
+A pipeline is a group of Heroku apps that share the same codebase. Once a
+pipeline is created using [`heroku_pipeline`](./pipeline), and apps are added
+to different stages using `heroku_pipeline_coupling`, you can promote app slugs
+to the downstream stages.
+
+## Example Usage
+
+```hcl
+# Create Heroku apps for staging and production
+resource "heroku_app" "staging" {
+ name = "test-app-staging"
+}
+
+resource "heroku_app" "production" {
+ name = "test-app-production"
+}
+
+# Create a Heroku pipeline
+resource "heroku_pipeline" "test-app" {
+ name = "test-app"
+}
+
+# Couple apps to different pipeline stages
+resource "heroku_pipeline_coupling" "staging" {
+ app = "${heroku_app.staging.name}"
+ pipeline = "${heroku_pipeline.test-app.id}"
+ stage = "staging"
+}
+
+resource "heroku_pipeline_coupling" "production" {
+ app = "${heroku_app.production.name}"
+ pipeline = "${heroku_pipeline.test-app.id}"
+ stage = "production"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `app` - (Required) The name of the app for this coupling.
+* `pipeline` - (Required) The ID of the pipeline to add this app to.
+* `stage` - (Required) The stage to couple this app to. Must be one of
+`review`, `development`, `staging`, or `production`.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The UUID of this pipeline coupling.
+* `app` - The name of the application.
+* `pipeline` - The UUID of the pipeline.
+* `stage` - The stage for this coupling.
diff --git a/website/source/docs/providers/nomad/index.html.markdown b/website/source/docs/providers/nomad/index.html.markdown
index acf696d2e..129a9540b 100644
--- a/website/source/docs/providers/nomad/index.html.markdown
+++ b/website/source/docs/providers/nomad/index.html.markdown
@@ -34,3 +34,6 @@ The following arguments are supported:
* `address` - (Optional) The HTTP(S) API address of the Nomad agent to use. Defaults to `http://127.0.0.1:4646`. The `NOMAD_ADDR` environment variable can also be used.
* `region` - (Optional) The Nomad region to target. The `NOMAD_REGION` environment variable can also be used.
+* `ca_file` - (Optional) A path to a PEM-encoded certificate authority used to verify the remote agent's certificate. The `NOMAD_CACERT` environment variable can also be used.
+* `cert_file` - (Optional) A path to a PEM-encoded certificate provided to the remote agent; requires use of `key_file`. The `NOMAD_CLIENT_CERT` environment variable can also be used.
+* `key_file`- (Optional) A path to a PEM-encoded private key, required if `cert_file` is specified. The `NOMAD_CLIENT_KEY` environment variable can also be used.
diff --git a/website/source/docs/providers/triton/index.html.markdown b/website/source/docs/providers/triton/index.html.markdown
index 7dc866d27..22b9a40e6 100644
--- a/website/source/docs/providers/triton/index.html.markdown
+++ b/website/source/docs/providers/triton/index.html.markdown
@@ -33,3 +33,4 @@ The following arguments are supported in the `provider` block:
* `key_material` - (Optional) This is the private key of an SSH key associated with the Triton account to be used. If this is not set, the private key corresponding to the fingerprint in `key_id` must be available via an SSH Agent.
* `key_id` - (Required) This is the fingerprint of the public key matching the key specified in `key_path`. It can be obtained via the command `ssh-keygen -l -E md5 -f /path/to/key`
* `url` - (Optional) This is the URL to the Triton API endpoint. It is required if using a private installation of Triton. The default is to use the Joyent public cloud us-west-1 endpoint. Valid public cloud endpoints include: `us-east-1`, `us-east-2`, `us-east-3`, `us-sw-1`, `us-west-1`, `eu-ams-1`
+* `insecure_skip_tls_verify` (Optional - defaults to false) This allows skipping TLS verification of the Triton endpoint. It is useful when connecting to a temporary Triton installation such as Cloud-On-A-Laptop which does not generally use a certificate signed by a trusted root CA.
diff --git a/website/source/docs/providers/triton/r/triton_machine.html.markdown b/website/source/docs/providers/triton/r/triton_machine.html.markdown
index f01fd8005..af34fade2 100644
--- a/website/source/docs/providers/triton/r/triton_machine.html.markdown
+++ b/website/source/docs/providers/triton/r/triton_machine.html.markdown
@@ -77,6 +77,9 @@ The following arguments are supported:
* `administrator_pw` - (string)
The initial password for the Administrator user. Only used for Windows virtual machines.
+* `cloud_config` - (string)
+ Cloud-init configuration for Linux brand machines, used instead of `user_data`.
+
The nested `nic` block supports the following:
* `network` - (string, Optional)
The network id to attach to the network interface. It will be hex, in the format: `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`.
diff --git a/website/source/index.html.erb b/website/source/index.html.erb
index ee33d640d..7412fb9d1 100644
--- a/website/source/index.html.erb
+++ b/website/source/index.html.erb
@@ -178,12 +178,12 @@ description: |-
-
Coming Soon!
+ <%= image_tag 'news/webinar-Terraform-4-4-2017.png', :alt=>'Webinar April 4, 2017, 10:00 AM PST' %>
+
Webinar: Multi-Cloud, One Command with Terraform
- We have some exciting new announcements planned for Terraform. Check
- back soon to learn more.
+ Watch our recent webinar with Mitchell Hashimoto to learn how Terraform provisions infrastructure across different clouds using a consistent workflow.
-
+ Watch Now
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 6cbb731b8..ec6633463 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -62,6 +62,9 @@
>
aws_ecs_task_definition
+
>
+ aws_efs_file_system
+
>
aws_elb_hosted_zone_id
@@ -69,7 +72,7 @@
aws_elb_service_account
>
- kinesis_stream
+ aws_kinesis_stream
>
aws_iam_account_alias
@@ -683,6 +686,10 @@
>
aws_emr_instance_group
+
+
>
+ aws_emr_security_configuration
+
@@ -1060,34 +1067,34 @@
WAF Resources
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
index 037498a04..80b38a275 100644
--- a/website/source/layouts/azurerm.erb
+++ b/website/source/layouts/azurerm.erb
@@ -264,6 +264,10 @@
azurerm_sql_database
+
>
+ azurerm_sql_elasticpool
+
+
>
azurerm_sql_firewall_rule
diff --git a/website/source/layouts/digitalocean.erb b/website/source/layouts/digitalocean.erb
index 0d91752a4..c124499a2 100644
--- a/website/source/layouts/digitalocean.erb
+++ b/website/source/layouts/digitalocean.erb
@@ -10,6 +10,15 @@
DigitalOcean Provider
+
>
+ Data Sources
+
+
+
>
Resources
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index 87d5b9dc7..c2b538e04 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -349,7 +349,7 @@
>
- OPC
+ Oracle Public Cloud
>
diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb
index f6b59fd4e..dd26ee066 100644
--- a/website/source/layouts/google.erb
+++ b/website/source/layouts/google.erb
@@ -149,7 +149,11 @@
>
google_compute_router_peer
-
+
+
>
+ google_compute_snapshot
+
+
>
google_compute_ssl_certificate
diff --git a/website/source/layouts/heroku.erb b/website/source/layouts/heroku.erb
index 0957af9a0..0d9d83b05 100644
--- a/website/source/layouts/heroku.erb
+++ b/website/source/layouts/heroku.erb
@@ -17,10 +17,14 @@
heroku_addon
-
>
+ >
heroku_app
+
>
+ heroku_app_feature
+
+
>
heroku_cert
@@ -33,6 +37,14 @@
heroku_drain
+
>
+ heroku_pipeline
+
+
+
>
+ heroku_pipeline_coupling
+
+
>
heroku_space