Merge remote-tracking branch 'upstream' into feature/additional_zones
This commit is contained in:
commit
776bc47df3
|
@ -11,6 +11,7 @@ install:
|
||||||
- bash scripts/gogetcookie.sh
|
- bash scripts/gogetcookie.sh
|
||||||
script:
|
script:
|
||||||
- make test vet
|
- make test vet
|
||||||
|
- GOOS=windows go build
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
|
|
106
CHANGELOG.md
106
CHANGELOG.md
|
@ -1,39 +1,91 @@
|
||||||
## 0.8.3 (unreleased)
|
## 0.8.5 (Unreleased)
|
||||||
|
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
|
||||||
* **New Provider:** `Ignition` [GH-6189]
|
* **New Data Source:** `aws_elb_hosted_zone_id ` [GH-11027]
|
||||||
* **New Data Source:** `aws_vpc_peering_connection` [GH-10913]
|
|
||||||
* **New Resource:** `azurerm_container_registry` [GH-10973]
|
|
||||||
* **New Resource:** `azurerm_eventhub_authorization_rule` [GH-10971]
|
|
||||||
* **New Resource:** `azurerm_eventhub_consumer_group` [GH-9902]
|
|
||||||
|
|
||||||
IMPROVEMENTS:
|
IMPROVEMENTS:
|
||||||
|
|
||||||
* provider/archive: `archive_file` now exports `output_md5` attribute in addition to existing SHA1 and Base64 SHA256 hashes. [GH-10851]
|
* provider/aws: Add 'route_table_id' to route_table data source ([#11157](https://github.com/hashicorp/terraform/pull/11157))
|
||||||
* provider/aws: Add `most_recent` to the `ebs_snapshot` data source [GH-10986]
|
* provider/aws: Add Support for aws_cloudwatch_metric_alarm extended statistic [GH-11193]
|
||||||
* provider/aws: Add support for instance tenancy in `aws_opsworks_instance` [GH-10885]
|
* provider/azurerm: add caching support for virtual_machine data_disks [GH-11142]
|
||||||
* provider/aws: Added a validation for security group rule types [GH-10864]
|
* provider/azurerm: make lb sub resources idempotent [GH-11128]
|
||||||
* provider:aws: Add support for updating aws_emr_cluster parameters [GH-11008]
|
* provider/google: Add subnetwork_project field to enable cross-project networking in instance templates [GH-11110]
|
||||||
* provider/azurerm: Azure resource providers which are already registered are no longer re-registered. [GH-10991]
|
* provider/openstack: LoadBalancer Security Groups [GH-11074]
|
||||||
* provider/docker: Add network create --internal flag support [GH-10932]
|
* provider/statuscake: Add support for StatusCake confirmation servers [GH-11179]
|
||||||
* provider/docker: Add support for a list of pull_triggers within the docker_image resource. [GH-10845]
|
|
||||||
* provider/pagerduty Add delete support to `pagerduty_service_integration` [GH-10891]
|
|
||||||
* provider/postgresql Add permissions support to `postgresql_schema` as nested `policy` attributes [GH-10808]
|
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
|
|
||||||
* provider/aws: Guard against nil change output in `route53_zone` that causes panic [GH-10798]
|
* provider/aws: Fix panic when querying VPC's main route table via data source ([#11134](https://github.com/hashicorp/terraform/issues/11134))
|
||||||
* provider/aws: Reworked validateArn function to handle empty values [GH-10833]
|
* provider/aws: Allow creating aws_codecommit repository outside of us-east-1 [GH-11177]
|
||||||
* provider/aws: Set `aws_autoscaling_policy` `metric_aggregation_type` to be Computed [GH-10904]
|
|
||||||
* provider/aws: `storage_class` is now correctly treated as optional when configuring replication for `aws_s3_bucket` resources. [GH-10921]
|
## 0.8.4 (January 11, 2017)
|
||||||
* provider/aws: `user_data` on `aws_launch_configuration` resources is only base 64 encoded if the value provided is not already base 64 encoded. [GH-10871]
|
|
||||||
* provider/aws: Add snapshotting to the list of pending state for elasticache [GH-10965]
|
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||||
* provider/aws: Add support for updating tags in aws_emr_cluster [GH-11003]
|
|
||||||
* provider/aws: Fix the normalization of AWS policy statements [GH-11009]
|
* We have removed the `Arukas` provider that was added in v0.8.3 for this release. Unfortunately we found the
|
||||||
* provider/aws: data_source_aws_iam_server_certificate latest should be bool not string causes panic [GH-11016]
|
new provider included a dependency that would not compile and run on Windows operating systems. For now the
|
||||||
* provider/google: Fix backwards incompatibility around create_timeout in instances [GH-10858]
|
provider has been removed and we hope to work to reintroduce it for all platforms in the near future. Going forward we will also be taking additional steps in our build testing to ensure Terraform builds on all platforms before release.
|
||||||
* provider/openstack: Handle `PENDING_UPDATE` status with LBaaS v2 members [GH-10875]
|
|
||||||
|
## 0.8.3 (January 10, 2017)
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* **New Provider:** `Arukas` ([#10862](https://github.com/hashicorp/terraform/issues/10862))
|
||||||
|
* **New Provider:** `Ignition` ([#6189](https://github.com/hashicorp/terraform/issues/6189))
|
||||||
|
* **New Provider:** `OpsGenie` ([#11012](https://github.com/hashicorp/terraform/issues/11012))
|
||||||
|
* **New Data Source:** `aws_vpc_peering_connection` ([#10913](https://github.com/hashicorp/terraform/issues/10913))
|
||||||
|
* **New Resource:** `aws_codedeploy_deployment_config` ([#11062](https://github.com/hashicorp/terraform/issues/11062))
|
||||||
|
* **New Resource:** `azurerm_container_registry` ([#10973](https://github.com/hashicorp/terraform/issues/10973))
|
||||||
|
* **New Resource:** `azurerm_eventhub_authorization_rule` ([#10971](https://github.com/hashicorp/terraform/issues/10971))
|
||||||
|
* **New Resource:** `azurerm_eventhub_consumer_group` ([#9902](https://github.com/hashicorp/terraform/issues/9902))
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* command/fmt: Show filename on parse error ([#10923](https://github.com/hashicorp/terraform/issues/10923))
|
||||||
|
* provider/archive: `archive_file` now exports `output_md5` attribute in addition to existing SHA1 and Base64 SHA256 hashes. ([#10851](https://github.com/hashicorp/terraform/issues/10851))
|
||||||
|
* provider/aws: Add `most_recent` to the `ebs_snapshot` data source ([#10986](https://github.com/hashicorp/terraform/issues/10986))
|
||||||
|
* provider/aws: Add support for instance tenancy in `aws_opsworks_instance` ([#10885](https://github.com/hashicorp/terraform/issues/10885))
|
||||||
|
* provider/aws: Added a validation for security group rule types ([#10864](https://github.com/hashicorp/terraform/issues/10864))
|
||||||
|
* provider:aws: Add support for updating aws_emr_cluster parameters ([#11008](https://github.com/hashicorp/terraform/issues/11008))
|
||||||
|
* provider/aws: Add Placement Constraints to `aws_ecs_task_definition` ([#11030](https://github.com/hashicorp/terraform/issues/11030))
|
||||||
|
* provider/aws: Increasing timeout for redshift cluster creation to 75 minutes ([#11041](https://github.com/hashicorp/terraform/issues/11041))
|
||||||
|
* provider/aws: Add support for content_handling to aws_api_gateway_integration_response ([#11002](https://github.com/hashicorp/terraform/issues/11002))
|
||||||
|
* provider/aws: Add S3 bucket name validation ([#11116](https://github.com/hashicorp/terraform/issues/11116))
|
||||||
|
* provider/aws: Add Route53 Record type validation ([#11119](https://github.com/hashicorp/terraform/issues/11119))
|
||||||
|
* provider/azurerm: support non public clouds ([#11026](https://github.com/hashicorp/terraform/issues/11026))
|
||||||
|
* provider/azurerm: Azure resource providers which are already registered are no longer re-registered. ([#10991](https://github.com/hashicorp/terraform/issues/10991))
|
||||||
|
* provider/docker: Add network create --internal flag support ([#10932](https://github.com/hashicorp/terraform/issues/10932))
|
||||||
|
* provider/docker: Add support for a list of pull_triggers within the docker_image resource. ([#10845](https://github.com/hashicorp/terraform/issues/10845))
|
||||||
|
* provider/pagerduty Add delete support to `pagerduty_service_integration` ([#10891](https://github.com/hashicorp/terraform/issues/10891))
|
||||||
|
* provider/postgresql Add permissions support to `postgresql_schema` as nested `policy` attributes ([#10808](https://github.com/hashicorp/terraform/issues/10808))
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* core: Properly expand sets as lists from a flatmap [[#11042](https://github.com/hashicorp/terraform/issues/11042)]
|
||||||
|
* core: Disallow root modules named "root" as a temporary workaround ([#11099](https://github.com/hashicorp/terraform/issues/11099))
|
||||||
|
* command/fmt: Lists of heredocs format properly ([#10947](https://github.com/hashicorp/terraform/issues/10947))
|
||||||
|
* command/graph: Fix crash when `-type=legacy` ([#11095](https://github.com/hashicorp/terraform/issues/11095))
|
||||||
|
* provider/aws: Guard against nil change output in `route53_zone` that causes panic ([#10798](https://github.com/hashicorp/terraform/issues/10798))
|
||||||
|
* provider/aws: Reworked validateArn function to handle empty values ([#10833](https://github.com/hashicorp/terraform/issues/10833))
|
||||||
|
* provider/aws: Set `aws_autoscaling_policy` `metric_aggregation_type` to be Computed ([#10904](https://github.com/hashicorp/terraform/issues/10904))
|
||||||
|
* provider/aws: `storage_class` is now correctly treated as optional when configuring replication for `aws_s3_bucket` resources. ([#10921](https://github.com/hashicorp/terraform/issues/10921))
|
||||||
|
* provider/aws: `user_data` on `aws_launch_configuration` resources is only base 64 encoded if the value provided is not already base 64 encoded. ([#10871](https://github.com/hashicorp/terraform/issues/10871))
|
||||||
|
* provider/aws: Add snapshotting to the list of pending state for elasticache ([#10965](https://github.com/hashicorp/terraform/issues/10965))
|
||||||
|
* provider/aws: Add support for updating tags in aws_emr_cluster ([#11003](https://github.com/hashicorp/terraform/issues/11003))
|
||||||
|
* provider/aws: Fix the normalization of AWS policy statements ([#11009](https://github.com/hashicorp/terraform/issues/11009))
|
||||||
|
* provider/aws: data_source_aws_iam_server_certificate latest should be bool not string causes panic ([#11016](https://github.com/hashicorp/terraform/issues/11016))
|
||||||
|
* provider/aws: Fix typo in aws_redshift_cluster causing security groups to not allow update ([#11025](https://github.com/hashicorp/terraform/issues/11025))
|
||||||
|
* provider/aws: Set `key_name` in `aws_key_pair` if omited in configuration ([#10987](https://github.com/hashicorp/terraform/issues/10987))
|
||||||
|
* provider/aws: Updating the aws_efs_mount_target dns_name ([#11023](https://github.com/hashicorp/terraform/issues/11023))
|
||||||
|
* provider/aws: Validate window time format for snapshot times and backup windows on RDS and ElastiCache resources ([#11089](https://github.com/hashicorp/terraform/issues/11089))
|
||||||
|
* provider/aws: aws_db_instance restored from snapshot had problem with subnet_group ([#11050](https://github.com/hashicorp/terraform/issues/11050))
|
||||||
|
* provider/aws: Allow disabled access_log in ELB ([#11120](https://github.com/hashicorp/terraform/issues/11120))
|
||||||
|
* provider/azurerm: fix update protocol for lb_probe ([#11125](https://github.com/hashicorp/terraform/issues/11125))
|
||||||
|
* provider/google: Fix backwards incompatibility around create_timeout in instances ([#10858](https://github.com/hashicorp/terraform/issues/10858))
|
||||||
|
* provider/google: google_compute_instance_group_manager update_strategy not properly read ([#10174](https://github.com/hashicorp/terraform/issues/10174))
|
||||||
|
* provider/openstack: Handle `PENDING_UPDATE` status with LBaaS v2 members ([#10875](https://github.com/hashicorp/terraform/issues/10875))
|
||||||
|
* provider/rancher: Add 'finishing-upgrade' state to rancher stack ([#11019](https://github.com/hashicorp/terraform/issues/11019))
|
||||||
|
|
||||||
|
|
||||||
## 0.8.2 (December 21, 2016)
|
## 0.8.2 (December 21, 2016)
|
||||||
|
|
|
@ -88,7 +88,7 @@ Assuming your work is on a branch called `my-feature-branch`, the steps look lik
|
||||||
go get github.com/hashicorp/my-project
|
go get github.com/hashicorp/my-project
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Add the new package to your vendor/ directory:
|
2. Add the new package to your `vendor/` directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
govendor add github.com/hashicorp/my-project/package
|
govendor add github.com/hashicorp/my-project/package
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/builtin/providers/opsgenie"
|
||||||
|
"github.com/hashicorp/terraform/plugin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
plugin.Serve(&plugin.ServeOpts{
|
||||||
|
ProviderFunc: opsgenie.Provider,
|
||||||
|
})
|
||||||
|
}
|
|
@ -52,6 +52,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/route53"
|
"github.com/aws/aws-sdk-go/service/route53"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/aws/aws-sdk-go/service/ses"
|
"github.com/aws/aws-sdk-go/service/ses"
|
||||||
|
"github.com/aws/aws-sdk-go/service/sfn"
|
||||||
"github.com/aws/aws-sdk-go/service/simpledb"
|
"github.com/aws/aws-sdk-go/service/simpledb"
|
||||||
"github.com/aws/aws-sdk-go/service/sns"
|
"github.com/aws/aws-sdk-go/service/sns"
|
||||||
"github.com/aws/aws-sdk-go/service/sqs"
|
"github.com/aws/aws-sdk-go/service/sqs"
|
||||||
|
@ -141,6 +142,7 @@ type AWSClient struct {
|
||||||
glacierconn *glacier.Glacier
|
glacierconn *glacier.Glacier
|
||||||
codedeployconn *codedeploy.CodeDeploy
|
codedeployconn *codedeploy.CodeDeploy
|
||||||
codecommitconn *codecommit.CodeCommit
|
codecommitconn *codecommit.CodeCommit
|
||||||
|
sfnconn *sfn.SFN
|
||||||
ssmconn *ssm.SSM
|
ssmconn *ssm.SSM
|
||||||
wafconn *waf.WAF
|
wafconn *waf.WAF
|
||||||
}
|
}
|
||||||
|
@ -264,7 +266,7 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
client.cloudwatchconn = cloudwatch.New(sess)
|
client.cloudwatchconn = cloudwatch.New(sess)
|
||||||
client.cloudwatcheventsconn = cloudwatchevents.New(sess)
|
client.cloudwatcheventsconn = cloudwatchevents.New(sess)
|
||||||
client.cloudwatchlogsconn = cloudwatchlogs.New(sess)
|
client.cloudwatchlogsconn = cloudwatchlogs.New(sess)
|
||||||
client.codecommitconn = codecommit.New(usEast1Sess)
|
client.codecommitconn = codecommit.New(sess)
|
||||||
client.codedeployconn = codedeploy.New(sess)
|
client.codedeployconn = codedeploy.New(sess)
|
||||||
client.dsconn = directoryservice.New(sess)
|
client.dsconn = directoryservice.New(sess)
|
||||||
client.dynamodbconn = dynamodb.New(dynamoSess)
|
client.dynamodbconn = dynamodb.New(dynamoSess)
|
||||||
|
@ -292,6 +294,7 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
client.simpledbconn = simpledb.New(sess)
|
client.simpledbconn = simpledb.New(sess)
|
||||||
client.s3conn = s3.New(awsS3Sess)
|
client.s3conn = s3.New(awsS3Sess)
|
||||||
client.sesConn = ses.New(sess)
|
client.sesConn = ses.New(sess)
|
||||||
|
client.sfnconn = sfn.New(sess)
|
||||||
client.snsconn = sns.New(sess)
|
client.snsconn = sns.New(sess)
|
||||||
client.sqsconn = sqs.New(sess)
|
client.sqsconn = sqs.New(sess)
|
||||||
client.ssmconn = ssm.New(sess)
|
client.ssmconn = ssm.New(sess)
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// See https://github.com/fog/fog-aws/pull/332/files
|
||||||
|
// This list isn't exposed by AWS - it's been found through
|
||||||
|
// trouble solving
|
||||||
|
var elbHostedZoneIdPerRegionMap = map[string]string{
|
||||||
|
"ap-northeast-1": "Z14GRHDCWA56QT",
|
||||||
|
"ap-northeast-2": "ZWKZPGTI48KDX",
|
||||||
|
"ap-south-1": "ZP97RAFLXTNZK",
|
||||||
|
"ap-southeast-1": "Z1LMS91P8CMLE5",
|
||||||
|
"ap-southeast-2": "Z1GM3OXH4ZPM65",
|
||||||
|
"ca-central-1": "ZQSVJUPU6J1EY",
|
||||||
|
"eu-central-1": "Z215JYRZR1TBD5",
|
||||||
|
"eu-west-1": "Z32O12XQLNTSW2",
|
||||||
|
"eu-west-2": "ZHURV8PSTC4K8",
|
||||||
|
"us-east-1": "Z35SXDOTRQ7X7K",
|
||||||
|
"us-east-2": "Z3AADJGX6KTTL2",
|
||||||
|
"us-west-1": "Z368ELLRRE2KJ0",
|
||||||
|
"us-west-2": "Z1H1FL5HABSF5",
|
||||||
|
"sa-east-1": "Z2P70J7HTTTPLU",
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceAwsElbHostedZoneId() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceAwsElbHostedZoneIdRead,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"region": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceAwsElbHostedZoneIdRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
region := meta.(*AWSClient).region
|
||||||
|
if v, ok := d.GetOk("region"); ok {
|
||||||
|
region = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if zoneId, ok := elbHostedZoneIdPerRegionMap[region]; ok {
|
||||||
|
d.SetId(zoneId)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Unknown region (%q)", region)
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSElbHostedZoneId_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccCheckAwsElbHostedZoneIdConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr("data.aws_elb_hosted_zone_id.main", "id", "Z1H1FL5HABSF5"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccCheckAwsElbHostedZoneIdExplicitRegionConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr("data.aws_elb_hosted_zone_id.regional", "id", "Z32O12XQLNTSW2"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccCheckAwsElbHostedZoneIdConfig = `
|
||||||
|
data "aws_elb_hosted_zone_id" "main" { }
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccCheckAwsElbHostedZoneIdExplicitRegionConfig = `
|
||||||
|
data "aws_elb_hosted_zone_id" "regional" {
|
||||||
|
region = "eu-west-1"
|
||||||
|
}
|
||||||
|
`
|
|
@ -18,6 +18,11 @@ func dataSourceAwsRouteTable() *schema.Resource {
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"route_table_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"vpc_id": {
|
"vpc_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -98,14 +103,16 @@ func dataSourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error
|
||||||
req := &ec2.DescribeRouteTablesInput{}
|
req := &ec2.DescribeRouteTablesInput{}
|
||||||
vpcId, vpcIdOk := d.GetOk("vpc_id")
|
vpcId, vpcIdOk := d.GetOk("vpc_id")
|
||||||
subnetId, subnetIdOk := d.GetOk("subnet_id")
|
subnetId, subnetIdOk := d.GetOk("subnet_id")
|
||||||
|
rtbId, rtbOk := d.GetOk("route_table_id")
|
||||||
tags, tagsOk := d.GetOk("tags")
|
tags, tagsOk := d.GetOk("tags")
|
||||||
filter, filterOk := d.GetOk("filter")
|
filter, filterOk := d.GetOk("filter")
|
||||||
|
|
||||||
if !vpcIdOk && !subnetIdOk && !tagsOk && !filterOk {
|
if !vpcIdOk && !subnetIdOk && !tagsOk && !filterOk && !rtbOk {
|
||||||
return fmt.Errorf("One of vpc_id, subnet_id, filters, or tags must be assigned")
|
return fmt.Errorf("One of route_table_id, vpc_id, subnet_id, filters, or tags must be assigned")
|
||||||
}
|
}
|
||||||
req.Filters = buildEC2AttributeFilterList(
|
req.Filters = buildEC2AttributeFilterList(
|
||||||
map[string]string{
|
map[string]string{
|
||||||
|
"route-table-id": rtbId.(string),
|
||||||
"vpc-id": vpcId.(string),
|
"vpc-id": vpcId.(string),
|
||||||
"association.subnet-id": subnetId.(string),
|
"association.subnet-id": subnetId.(string),
|
||||||
},
|
},
|
||||||
|
@ -197,7 +204,10 @@ func dataSourceAssociationsRead(ec2Assocations []*ec2.RouteTableAssociation) []m
|
||||||
m := make(map[string]interface{})
|
m := make(map[string]interface{})
|
||||||
m["route_table_id"] = *a.RouteTableId
|
m["route_table_id"] = *a.RouteTableId
|
||||||
m["route_table_association_id"] = *a.RouteTableAssociationId
|
m["route_table_association_id"] = *a.RouteTableAssociationId
|
||||||
|
// GH[11134]
|
||||||
|
if a.SubnetId != nil {
|
||||||
m["subnet_id"] = *a.SubnetId
|
m["subnet_id"] = *a.SubnetId
|
||||||
|
}
|
||||||
m["main"] = *a.Main
|
m["main"] = *a.Main
|
||||||
associations = append(associations, m)
|
associations = append(associations, m)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,22 @@ func TestAccDataSourceAwsRouteTable(t *testing.T) {
|
||||||
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"),
|
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"),
|
||||||
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_filter"),
|
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_filter"),
|
||||||
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_subnet"),
|
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_subnet"),
|
||||||
|
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_id"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccDataSourceAwsRouteTable_main(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDataSourceAwsRouteTableMainRoute,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccDataSourceAwsRouteTableCheckMain("data.aws_route_table.by_filter"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -78,6 +94,32 @@ func testAccDataSourceAwsRouteTableCheck(name string) resource.TestCheckFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceAwsRouteTableCheckMain(name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("root module has no resource called %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := rs.Primary.Attributes
|
||||||
|
|
||||||
|
// Verify attributes are set
|
||||||
|
if _, ok := attr["id"]; !ok {
|
||||||
|
return fmt.Errorf("id not set for main route table")
|
||||||
|
}
|
||||||
|
if _, ok := attr["vpc_id"]; !ok {
|
||||||
|
return fmt.Errorf("vpc_id not set for main route table")
|
||||||
|
}
|
||||||
|
// Verify it's actually the main route table that's returned
|
||||||
|
if attr["associations.0.main"] != "true" {
|
||||||
|
return fmt.Errorf("main route table not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const testAccDataSourceAwsRouteTableGroupConfig = `
|
const testAccDataSourceAwsRouteTableGroupConfig = `
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
region = "eu-central-1"
|
region = "eu-central-1"
|
||||||
|
@ -124,9 +166,28 @@ data "aws_route_table" "by_tag" {
|
||||||
}
|
}
|
||||||
depends_on = ["aws_route_table_association.a"]
|
depends_on = ["aws_route_table_association.a"]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "aws_route_table" "by_subnet" {
|
data "aws_route_table" "by_subnet" {
|
||||||
subnet_id = "${aws_subnet.test.id}"
|
subnet_id = "${aws_subnet.test.id}"
|
||||||
depends_on = ["aws_route_table_association.a"]
|
depends_on = ["aws_route_table_association.a"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data "aws_route_table" "by_id" {
|
||||||
|
route_table_id = "${aws_route_table.test.id}"
|
||||||
|
depends_on = ["aws_route_table_association.a"]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
// Uses us-east-2, as region only has a single main route table
|
||||||
|
const testAccDataSourceAwsRouteTableMainRoute = `
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-2"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_route_table" "by_filter" {
|
||||||
|
filter {
|
||||||
|
name = "association.main"
|
||||||
|
values = ["true"]
|
||||||
|
}
|
||||||
|
}
|
||||||
`
|
`
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAwsRoute53Record_importBasic(t *testing.T) {
|
||||||
|
resourceName := "aws_route53_record.default"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckRoute53RecordDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccRoute53RecordConfig,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
ImportStateVerifyIgnore: []string{"weight"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
|
@ -156,6 +156,7 @@ func Provider() terraform.ResourceProvider {
|
||||||
"aws_ebs_volume": dataSourceAwsEbsVolume(),
|
"aws_ebs_volume": dataSourceAwsEbsVolume(),
|
||||||
"aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(),
|
"aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(),
|
||||||
"aws_eip": dataSourceAwsEip(),
|
"aws_eip": dataSourceAwsEip(),
|
||||||
|
"aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(),
|
||||||
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
|
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
|
||||||
"aws_iam_account_alias": dataSourceAwsIamAccountAlias(),
|
"aws_iam_account_alias": dataSourceAwsIamAccountAlias(),
|
||||||
"aws_iam_policy_document": dataSourceAwsIamPolicyDocument(),
|
"aws_iam_policy_document": dataSourceAwsIamPolicyDocument(),
|
||||||
|
@ -219,6 +220,7 @@ func Provider() terraform.ResourceProvider {
|
||||||
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
|
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
|
||||||
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
|
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
|
||||||
"aws_codedeploy_app": resourceAwsCodeDeployApp(),
|
"aws_codedeploy_app": resourceAwsCodeDeployApp(),
|
||||||
|
"aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(),
|
||||||
"aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(),
|
"aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(),
|
||||||
"aws_codecommit_repository": resourceAwsCodeCommitRepository(),
|
"aws_codecommit_repository": resourceAwsCodeCommitRepository(),
|
||||||
"aws_codecommit_trigger": resourceAwsCodeCommitTrigger(),
|
"aws_codecommit_trigger": resourceAwsCodeCommitTrigger(),
|
||||||
|
|
|
@ -295,6 +295,7 @@ resource "aws_lambda_function" "authorizer" {
|
||||||
function_name = "tf_acc_api_gateway_authorizer"
|
function_name = "tf_acc_api_gateway_authorizer"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.example"
|
handler = "exports.example"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ func resourceAwsApiGatewayIntegration() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsApiGatewayIntegrationCreate,
|
Create: resourceAwsApiGatewayIntegrationCreate,
|
||||||
Read: resourceAwsApiGatewayIntegrationRead,
|
Read: resourceAwsApiGatewayIntegrationRead,
|
||||||
Update: resourceAwsApiGatewayIntegrationUpdate,
|
Update: resourceAwsApiGatewayIntegrationCreate,
|
||||||
Delete: resourceAwsApiGatewayIntegrationDelete,
|
Delete: resourceAwsApiGatewayIntegrationDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
@ -202,10 +202,6 @@ func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
return resourceAwsApiGatewayIntegrationCreate(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).apigateway
|
conn := meta.(*AWSClient).apigateway
|
||||||
log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id())
|
log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id())
|
||||||
|
|
|
@ -17,58 +17,64 @@ func resourceAwsApiGatewayIntegrationResponse() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsApiGatewayIntegrationResponseCreate,
|
Create: resourceAwsApiGatewayIntegrationResponseCreate,
|
||||||
Read: resourceAwsApiGatewayIntegrationResponseRead,
|
Read: resourceAwsApiGatewayIntegrationResponseRead,
|
||||||
Update: resourceAwsApiGatewayIntegrationResponseUpdate,
|
Update: resourceAwsApiGatewayIntegrationResponseCreate,
|
||||||
Delete: resourceAwsApiGatewayIntegrationResponseDelete,
|
Delete: resourceAwsApiGatewayIntegrationResponseDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"rest_api_id": &schema.Schema{
|
"rest_api_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"resource_id": &schema.Schema{
|
"resource_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"http_method": &schema.Schema{
|
"http_method": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateHTTPMethod,
|
ValidateFunc: validateHTTPMethod,
|
||||||
},
|
},
|
||||||
|
|
||||||
"status_code": &schema.Schema{
|
"status_code": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"selection_pattern": &schema.Schema{
|
"selection_pattern": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"response_templates": &schema.Schema{
|
"response_templates": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: schema.TypeString,
|
Elem: schema.TypeString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"response_parameters": &schema.Schema{
|
"response_parameters": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Elem: schema.TypeString,
|
Elem: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ConflictsWith: []string{"response_parameters_in_json"},
|
ConflictsWith: []string{"response_parameters_in_json"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"response_parameters_in_json": &schema.Schema{
|
"response_parameters_in_json": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ConflictsWith: []string{"response_parameters"},
|
ConflictsWith: []string{"response_parameters"},
|
||||||
Deprecated: "Use field response_parameters instead",
|
Deprecated: "Use field response_parameters instead",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"content_handling": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ValidateFunc: validateApiGatewayIntegrationContentHandling,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -92,6 +98,10 @@ func resourceAwsApiGatewayIntegrationResponseCreate(d *schema.ResourceData, meta
|
||||||
return fmt.Errorf("Error unmarshaling response_parameters_in_json: %s", err)
|
return fmt.Errorf("Error unmarshaling response_parameters_in_json: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var contentHandling *string
|
||||||
|
if val, ok := d.GetOk("content_handling"); ok {
|
||||||
|
contentHandling = aws.String(val.(string))
|
||||||
|
}
|
||||||
|
|
||||||
input := apigateway.PutIntegrationResponseInput{
|
input := apigateway.PutIntegrationResponseInput{
|
||||||
HttpMethod: aws.String(d.Get("http_method").(string)),
|
HttpMethod: aws.String(d.Get("http_method").(string)),
|
||||||
|
@ -100,10 +110,12 @@ func resourceAwsApiGatewayIntegrationResponseCreate(d *schema.ResourceData, meta
|
||||||
StatusCode: aws.String(d.Get("status_code").(string)),
|
StatusCode: aws.String(d.Get("status_code").(string)),
|
||||||
ResponseTemplates: aws.StringMap(templates),
|
ResponseTemplates: aws.StringMap(templates),
|
||||||
ResponseParameters: aws.StringMap(parameters),
|
ResponseParameters: aws.StringMap(parameters),
|
||||||
|
ContentHandling: contentHandling,
|
||||||
}
|
}
|
||||||
if v, ok := d.GetOk("selection_pattern"); ok {
|
if v, ok := d.GetOk("selection_pattern"); ok {
|
||||||
input.SelectionPattern = aws.String(v.(string))
|
input.SelectionPattern = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := conn.PutIntegrationResponse(&input)
|
_, err := conn.PutIntegrationResponse(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating API Gateway Integration Response: %s", err)
|
return fmt.Errorf("Error creating API Gateway Integration Response: %s", err)
|
||||||
|
@ -143,10 +155,6 @@ func resourceAwsApiGatewayIntegrationResponseRead(d *schema.ResourceData, meta i
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegrationResponseUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
return resourceAwsApiGatewayIntegrationResponseCreate(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegrationResponseDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsApiGatewayIntegrationResponseDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).apigateway
|
conn := meta.(*AWSClient).apigateway
|
||||||
log.Printf("[DEBUG] Deleting API Gateway Integration Response: %s", d.Id())
|
log.Printf("[DEBUG] Deleting API Gateway Integration Response: %s", d.Id())
|
||||||
|
|
|
@ -28,6 +28,8 @@ func TestAccAWSAPIGatewayIntegrationResponse_basic(t *testing.T) {
|
||||||
"aws_api_gateway_integration_response.test", "response_templates.application/json", ""),
|
"aws_api_gateway_integration_response.test", "response_templates.application/json", ""),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_api_gateway_integration_response.test", "response_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"),
|
"aws_api_gateway_integration_response.test", "response_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_api_gateway_integration_response.test", "content_handling", ""),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -40,6 +42,8 @@ func TestAccAWSAPIGatewayIntegrationResponse_basic(t *testing.T) {
|
||||||
"aws_api_gateway_integration_response.test", "response_templates.application/json", "$input.path('$')"),
|
"aws_api_gateway_integration_response.test", "response_templates.application/json", "$input.path('$')"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_api_gateway_integration_response.test", "response_templates.application/xml", ""),
|
"aws_api_gateway_integration_response.test", "response_templates.application/xml", ""),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_api_gateway_integration_response.test", "content_handling", "CONVERT_TO_BINARY"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -282,5 +286,7 @@ resource "aws_api_gateway_integration_response" "test" {
|
||||||
"application/xml" = ""
|
"application/xml" = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
content_handling = "CONVERT_TO_BINARY"
|
||||||
|
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
|
@ -261,6 +261,7 @@ resource "aws_lambda_function" "authorizer" {
|
||||||
function_name = "tf_acc_api_gateway_authorizer"
|
function_name = "tf_acc_api_gateway_authorizer"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.example"
|
handler = "exports.example"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_api_gateway_authorizer" "test" {
|
resource "aws_api_gateway_authorizer" "test" {
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||||
|
"github.com/hashicorp/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceAwsCloudWatchLogGroup() *schema.Resource {
|
func resourceAwsCloudWatchLogGroup() *schema.Resource {
|
||||||
|
@ -21,23 +22,25 @@ func resourceAwsCloudWatchLogGroup() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateLogGroupName,
|
ValidateFunc: validateLogGroupName,
|
||||||
},
|
},
|
||||||
|
|
||||||
"retention_in_days": &schema.Schema{
|
"retention_in_days": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: 0,
|
Default: 0,
|
||||||
},
|
},
|
||||||
|
|
||||||
"arn": &schema.Schema{
|
"arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"tags": tagsSchema(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,6 +49,7 @@ func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{
|
||||||
conn := meta.(*AWSClient).cloudwatchlogsconn
|
conn := meta.(*AWSClient).cloudwatchlogsconn
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string))
|
log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string))
|
||||||
|
|
||||||
_, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
_, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||||
LogGroupName: aws.String(d.Get("name").(string)),
|
LogGroupName: aws.String(d.Get("name").(string)),
|
||||||
})
|
})
|
||||||
|
@ -83,6 +87,12 @@ func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{})
|
||||||
d.Set("retention_in_days", lg.RetentionInDays)
|
d.Set("retention_in_days", lg.RetentionInDays)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tags, err := flattenCloudWatchTags(d, conn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Set("tags", tags)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,9 +148,55 @@ func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("tags") {
|
||||||
|
oraw, nraw := d.GetChange("tags")
|
||||||
|
o := oraw.(map[string]interface{})
|
||||||
|
n := nraw.(map[string]interface{})
|
||||||
|
create, remove := diffCloudWatchTags(o, n)
|
||||||
|
|
||||||
|
if len(remove) > 0 {
|
||||||
|
log.Printf("[DEBUG] Removing tags from %s", name)
|
||||||
|
_, err := conn.UntagLogGroup(&cloudwatchlogs.UntagLogGroupInput{
|
||||||
|
LogGroupName: aws.String(name),
|
||||||
|
Tags: remove,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(create) > 0 {
|
||||||
|
log.Printf("[DEBUG] Creating tags on %s", name)
|
||||||
|
_, err := conn.TagLogGroup(&cloudwatchlogs.TagLogGroupInput{
|
||||||
|
LogGroupName: aws.String(name),
|
||||||
|
Tags: create,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resourceAwsCloudWatchLogGroupRead(d, meta)
|
return resourceAwsCloudWatchLogGroupRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func diffCloudWatchTags(oldTags map[string]interface{}, newTags map[string]interface{}) (map[string]*string, []*string) {
|
||||||
|
create := make(map[string]*string)
|
||||||
|
for k, v := range newTags {
|
||||||
|
create[k] = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
var remove []*string
|
||||||
|
for _, t := range oldTags {
|
||||||
|
old, ok := create[t.(string)]
|
||||||
|
if !ok || *old != t.(string) {
|
||||||
|
remove = append(remove, aws.String(t.(string)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return create, remove
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).cloudwatchlogsconn
|
conn := meta.(*AWSClient).cloudwatchlogsconn
|
||||||
log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id())
|
log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id())
|
||||||
|
@ -156,3 +212,23 @@ func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flattenCloudWatchTags(d *schema.ResourceData, conn *cloudwatchlogs.CloudWatchLogs) (map[string]interface{}, error) {
|
||||||
|
tagsOutput, err := conn.ListTagsLogGroup(&cloudwatchlogs.ListTagsLogGroupInput{
|
||||||
|
LogGroupName: aws.String(d.Get("name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("Error Getting CloudWatch Logs Tag List: %s", err)
|
||||||
|
}
|
||||||
|
if tagsOutput != nil {
|
||||||
|
output := make(map[string]interface{}, len(tagsOutput.Tags))
|
||||||
|
|
||||||
|
for i, v := range tagsOutput.Tags {
|
||||||
|
output[i] = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return make(map[string]interface{}), nil
|
||||||
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ func TestAccAWSCloudWatchLogGroup_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSCloudWatchLogGroupConfig(rInt),
|
Config: testAccAWSCloudWatchLogGroupConfig(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
@ -39,14 +39,14 @@ func TestAccAWSCloudWatchLogGroup_retentionPolicy(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSCloudWatchLogGroupConfig_withRetention(rInt),
|
Config: testAccAWSCloudWatchLogGroupConfig_withRetention(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "365"),
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "365"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSCloudWatchLogGroupConfigModified_withRetention(rInt),
|
Config: testAccAWSCloudWatchLogGroupConfigModified_withRetention(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
@ -66,7 +66,7 @@ func TestAccAWSCloudWatchLogGroup_multiple(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSCloudWatchLogGroupConfig_multiple(rInt),
|
Config: testAccAWSCloudWatchLogGroupConfig_multiple(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.alpha", &lg),
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.alpha", &lg),
|
||||||
|
@ -90,7 +90,7 @@ func TestAccAWSCloudWatchLogGroup_disappears(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSCloudWatchLogGroupConfig(rInt),
|
Config: testAccAWSCloudWatchLogGroupConfig(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
@ -102,6 +102,37 @@ func TestAccAWSCloudWatchLogGroup_disappears(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCloudWatchLogGroup_tagging(t *testing.T) {
|
||||||
|
var lg cloudwatchlogs.LogGroup
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSCloudWatchLogGroupConfigWithTags(rInt),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Environment", "Production"),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Foo", "Bar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAWSCloudWatchLogGroupConfigWithTagsUpdated(rInt),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.%", "3"),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Environment", "Development"),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Bar", "baz"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckCloudWatchLogGroupDisappears(lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc {
|
func testAccCheckCloudWatchLogGroupDisappears(lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn
|
conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn
|
||||||
|
@ -166,6 +197,33 @@ resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
`, rInt)
|
`, rInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccAWSCloudWatchLogGroupConfigWithTags(rInt int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
|
name = "foo-bar-%d"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Environment = "Production"
|
||||||
|
Foo = "Bar"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, rInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSCloudWatchLogGroupConfigWithTagsUpdated(rInt int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
|
name = "foo-bar-%d"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Environment = "Development"
|
||||||
|
Foo = "Bar"
|
||||||
|
Bar = "baz"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, rInt)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccAWSCloudWatchLogGroupConfig_withRetention(rInt int) string {
|
func testAccAWSCloudWatchLogGroupConfig_withRetention(rInt int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_cloudwatch_log_group" "foobar" {
|
resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
|
|
|
@ -21,74 +21,80 @@ func resourceAwsCloudWatchMetricAlarm() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"alarm_name": &schema.Schema{
|
"alarm_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"comparison_operator": &schema.Schema{
|
"comparison_operator": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"evaluation_periods": &schema.Schema{
|
"evaluation_periods": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"metric_name": &schema.Schema{
|
"metric_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"namespace": &schema.Schema{
|
"namespace": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"period": &schema.Schema{
|
"period": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"statistic": &schema.Schema{
|
"statistic": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"extended_statistic"},
|
||||||
},
|
},
|
||||||
"threshold": &schema.Schema{
|
"threshold": {
|
||||||
Type: schema.TypeFloat,
|
Type: schema.TypeFloat,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"actions_enabled": &schema.Schema{
|
"actions_enabled": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: true,
|
Default: true,
|
||||||
},
|
},
|
||||||
"alarm_actions": &schema.Schema{
|
"alarm_actions": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
"alarm_description": &schema.Schema{
|
"alarm_description": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"dimensions": &schema.Schema{
|
"dimensions": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"insufficient_data_actions": &schema.Schema{
|
"insufficient_data_actions": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
"ok_actions": &schema.Schema{
|
"ok_actions": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
"unit": &schema.Schema{
|
"unit": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
"extended_statistic": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"statistic"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,6 +102,13 @@ func resourceAwsCloudWatchMetricAlarm() *schema.Resource {
|
||||||
func resourceAwsCloudWatchMetricAlarmCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsCloudWatchMetricAlarmCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).cloudwatchconn
|
conn := meta.(*AWSClient).cloudwatchconn
|
||||||
|
|
||||||
|
_, statisticOk := d.GetOk("statistic")
|
||||||
|
_, extendedStatisticOk := d.GetOk("extended_statistic")
|
||||||
|
|
||||||
|
if !statisticOk && !extendedStatisticOk {
|
||||||
|
return fmt.Errorf("One of `statistic` or `extended_statistic` must be set for a cloudwatch metric alarm")
|
||||||
|
}
|
||||||
|
|
||||||
params := getAwsCloudWatchPutMetricAlarmInput(d)
|
params := getAwsCloudWatchPutMetricAlarmInput(d)
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating CloudWatch Metric Alarm: %#v", params)
|
log.Printf("[DEBUG] Creating CloudWatch Metric Alarm: %#v", params)
|
||||||
|
@ -147,6 +160,7 @@ func resourceAwsCloudWatchMetricAlarmRead(d *schema.ResourceData, meta interface
|
||||||
d.Set("statistic", a.Statistic)
|
d.Set("statistic", a.Statistic)
|
||||||
d.Set("threshold", a.Threshold)
|
d.Set("threshold", a.Threshold)
|
||||||
d.Set("unit", a.Unit)
|
d.Set("unit", a.Unit)
|
||||||
|
d.Set("extended_statistic", a.ExtendedStatistic)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -199,7 +213,6 @@ func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutM
|
||||||
MetricName: aws.String(d.Get("metric_name").(string)),
|
MetricName: aws.String(d.Get("metric_name").(string)),
|
||||||
Namespace: aws.String(d.Get("namespace").(string)),
|
Namespace: aws.String(d.Get("namespace").(string)),
|
||||||
Period: aws.Int64(int64(d.Get("period").(int))),
|
Period: aws.Int64(int64(d.Get("period").(int))),
|
||||||
Statistic: aws.String(d.Get("statistic").(string)),
|
|
||||||
Threshold: aws.Float64(d.Get("threshold").(float64)),
|
Threshold: aws.Float64(d.Get("threshold").(float64)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,6 +228,14 @@ func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutM
|
||||||
params.Unit = aws.String(v.(string))
|
params.Unit = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("statistic"); ok {
|
||||||
|
params.Statistic = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("extended_statistic"); ok {
|
||||||
|
params.ExtendedStatistic = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
var alarmActions []*string
|
var alarmActions []*string
|
||||||
if v := d.Get("alarm_actions"); v != nil {
|
if v := d.Get("alarm_actions"); v != nil {
|
||||||
for _, v := range v.(*schema.Set).List() {
|
for _, v := range v.(*schema.Set).List() {
|
||||||
|
|
|
@ -2,6 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -18,7 +19,7 @@ func TestAccAWSCloudWatchMetricAlarm_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy,
|
CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSCloudWatchMetricAlarmConfig,
|
Config: testAccAWSCloudWatchMetricAlarmConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCloudWatchMetricAlarmExists("aws_cloudwatch_metric_alarm.foobar", &alarm),
|
testAccCheckCloudWatchMetricAlarmExists("aws_cloudwatch_metric_alarm.foobar", &alarm),
|
||||||
|
@ -32,6 +33,39 @@ func TestAccAWSCloudWatchMetricAlarm_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCloudWatchMetricAlarm_extendedStatistic(t *testing.T) {
|
||||||
|
var alarm cloudwatch.MetricAlarm
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSCloudWatchMetricAlarmConfigExtendedStatistic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchMetricAlarmExists("aws_cloudwatch_metric_alarm.foobar", &alarm),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_metric_alarm.foobar", "extended_statistic", "p88.0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCloudWatchMetricAlarm_missingStatistic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSCloudWatchMetricAlarmConfigMissingStatistic,
|
||||||
|
ExpectError: regexp.MustCompile("One of `statistic` or `extended_statistic` must be set for a cloudwatch metric alarm"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckCloudWatchMetricAlarmDimension(n, k, v string) resource.TestCheckFunc {
|
func testAccCheckCloudWatchMetricAlarmDimension(n, k, v string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -116,3 +150,38 @@ resource "aws_cloudwatch_metric_alarm" "foobar" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
|
var testAccAWSCloudWatchMetricAlarmConfigExtendedStatistic = fmt.Sprintf(`
|
||||||
|
resource "aws_cloudwatch_metric_alarm" "foobar" {
|
||||||
|
alarm_name = "terraform-test-foobar6"
|
||||||
|
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||||
|
evaluation_periods = "2"
|
||||||
|
metric_name = "CPUUtilization"
|
||||||
|
namespace = "AWS/EC2"
|
||||||
|
period = "120"
|
||||||
|
extended_statistic = "p88.0"
|
||||||
|
threshold = "80"
|
||||||
|
alarm_description = "This metric monitors ec2 cpu utilization"
|
||||||
|
insufficient_data_actions = []
|
||||||
|
dimensions {
|
||||||
|
InstanceId = "i-abc123"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
var testAccAWSCloudWatchMetricAlarmConfigMissingStatistic = fmt.Sprintf(`
|
||||||
|
resource "aws_cloudwatch_metric_alarm" "foobar" {
|
||||||
|
alarm_name = "terraform-test-foobar6"
|
||||||
|
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||||
|
evaluation_periods = "2"
|
||||||
|
metric_name = "CPUUtilization"
|
||||||
|
namespace = "AWS/EC2"
|
||||||
|
period = "120"
|
||||||
|
threshold = "80"
|
||||||
|
alarm_description = "This metric monitors ec2 cpu utilization"
|
||||||
|
insufficient_data_actions = []
|
||||||
|
dimensions {
|
||||||
|
InstanceId = "i-abc123"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
|
@ -161,9 +161,6 @@ func testAccCheckCodeCommitRepositoryDestroy(s *terraform.State) error {
|
||||||
|
|
||||||
func testAccCodeCommitRepository_basic(rInt int) string {
|
func testAccCodeCommitRepository_basic(rInt int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
provider "aws" {
|
|
||||||
region = "us-east-1"
|
|
||||||
}
|
|
||||||
resource "aws_codecommit_repository" "test" {
|
resource "aws_codecommit_repository" "test" {
|
||||||
repository_name = "test_repository_%d"
|
repository_name = "test_repository_%d"
|
||||||
description = "This is a test description"
|
description = "This is a test description"
|
||||||
|
@ -173,9 +170,6 @@ resource "aws_codecommit_repository" "test" {
|
||||||
|
|
||||||
func testAccCodeCommitRepository_withChanges(rInt int) string {
|
func testAccCodeCommitRepository_withChanges(rInt int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
provider "aws" {
|
|
||||||
region = "us-east-1"
|
|
||||||
}
|
|
||||||
resource "aws_codecommit_repository" "test" {
|
resource "aws_codecommit_repository" "test" {
|
||||||
repository_name = "test_repository_%d"
|
repository_name = "test_repository_%d"
|
||||||
description = "This is a test description - with changes"
|
description = "This is a test description - with changes"
|
||||||
|
@ -185,9 +179,6 @@ resource "aws_codecommit_repository" "test" {
|
||||||
|
|
||||||
func testAccCodeCommitRepository_with_default_branch(rInt int) string {
|
func testAccCodeCommitRepository_with_default_branch(rInt int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
provider "aws" {
|
|
||||||
region = "us-east-1"
|
|
||||||
}
|
|
||||||
resource "aws_codecommit_repository" "test" {
|
resource "aws_codecommit_repository" "test" {
|
||||||
repository_name = "test_repository_%d"
|
repository_name = "test_repository_%d"
|
||||||
description = "This is a test description"
|
description = "This is a test description"
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/codedeploy"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsCodeDeployDeploymentConfig() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsCodeDeployDeploymentConfigCreate,
|
||||||
|
Read: resourceAwsCodeDeployDeploymentConfigRead,
|
||||||
|
Delete: resourceAwsCodeDeployDeploymentConfigDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"deployment_config_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"minimum_healthy_hosts": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
MaxItems: 1,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validateMinimumHealtyHostsType,
|
||||||
|
},
|
||||||
|
|
||||||
|
"value": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"deployment_config_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCodeDeployDeploymentConfigCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).codedeployconn
|
||||||
|
|
||||||
|
input := &codedeploy.CreateDeploymentConfigInput{
|
||||||
|
DeploymentConfigName: aws.String(d.Get("deployment_config_name").(string)),
|
||||||
|
MinimumHealthyHosts: expandAwsCodeDeployConfigMinimumHealthHosts(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.CreateDeploymentConfig(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(d.Get("deployment_config_name").(string))
|
||||||
|
|
||||||
|
return resourceAwsCodeDeployDeploymentConfigRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCodeDeployDeploymentConfigRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).codedeployconn
|
||||||
|
|
||||||
|
input := &codedeploy.GetDeploymentConfigInput{
|
||||||
|
DeploymentConfigName: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := conn.GetDeploymentConfig(input)
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if "DeploymentConfigDoesNotExistException" == awsErr.Code() {
|
||||||
|
log.Printf("[DEBUG] CodeDeploy Deployment Config (%s) not found", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.DeploymentConfigInfo == nil {
|
||||||
|
return fmt.Errorf("[ERROR] Cannot find DeploymentConfig %q", d.Id())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.Set("minimum_healthy_hosts", flattenAwsCodeDeployConfigMinimumHealthHosts(resp.DeploymentConfigInfo.MinimumHealthyHosts)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Set("deployment_config_id", resp.DeploymentConfigInfo.DeploymentConfigId)
|
||||||
|
d.Set("deployment_config_name", resp.DeploymentConfigInfo.DeploymentConfigName)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCodeDeployDeploymentConfigDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).codedeployconn
|
||||||
|
|
||||||
|
input := &codedeploy.DeleteDeploymentConfigInput{
|
||||||
|
DeploymentConfigName: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.DeleteDeploymentConfig(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandAwsCodeDeployConfigMinimumHealthHosts(d *schema.ResourceData) *codedeploy.MinimumHealthyHosts {
|
||||||
|
hosts := d.Get("minimum_healthy_hosts").([]interface{})
|
||||||
|
host := hosts[0].(map[string]interface{})
|
||||||
|
|
||||||
|
minimumHealthyHost := codedeploy.MinimumHealthyHosts{
|
||||||
|
Type: aws.String(host["type"].(string)),
|
||||||
|
Value: aws.Int64(int64(host["value"].(int))),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &minimumHealthyHost
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenAwsCodeDeployConfigMinimumHealthHosts(hosts *codedeploy.MinimumHealthyHosts) []map[string]interface{} {
|
||||||
|
result := make([]map[string]interface{}, 0)
|
||||||
|
|
||||||
|
item := make(map[string]interface{})
|
||||||
|
|
||||||
|
item["type"] = *hosts.Type
|
||||||
|
item["value"] = *hosts.Value
|
||||||
|
|
||||||
|
result = append(result, item)
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateMinimumHealtyHostsType(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if value != "FLEET_PERCENT" && value != "HOST_COUNT" {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must be one of \"FLEET_PERCENT\" or \"HOST_COUNT\"", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,177 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/codedeploy"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSCodeDeployDeploymentConfig_fleetPercent(t *testing.T) {
|
||||||
|
var config codedeploy.DeploymentConfigInfo
|
||||||
|
|
||||||
|
rName := acctest.RandString(5)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCodeDeployDeploymentConfigDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSCodeDeployDeploymentConfigFleet(rName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSCodeDeployDeploymentConfigExists("aws_codedeploy_deployment_config.foo", &config),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.type", "FLEET_PERCENT"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.value", "75"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCodeDeployDeploymentConfig_hostCount(t *testing.T) {
|
||||||
|
var config codedeploy.DeploymentConfigInfo
|
||||||
|
|
||||||
|
rName := acctest.RandString(5)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCodeDeployDeploymentConfigDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSCodeDeployDeploymentConfigHostCount(rName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSCodeDeployDeploymentConfigExists("aws_codedeploy_deployment_config.foo", &config),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.type", "HOST_COUNT"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.value", "1"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateAWSCodeDeployMinimumHealthyHostsType(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "FLEET_PERCENT",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "HOST_COUNT",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "host_count",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "hostcount",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "FleetPercent",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "Foo",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateMinimumHealtyHostsType(tc.Value, "minimum_healthy_hosts_type")
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Minimum Healthy Hosts validation failed for type %q: %q", tc.Value, errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSCodeDeployDeploymentConfigDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).codedeployconn
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_codedeploy_deployment_config" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := conn.GetDeploymentConfig(&codedeploy.GetDeploymentConfigInput{
|
||||||
|
DeploymentConfigName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if ae, ok := err.(awserr.Error); ok && ae.Code() == "DeploymentConfigDoesNotExistException" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if resp.DeploymentConfigInfo != nil {
|
||||||
|
return fmt.Errorf("CodeDeploy deployment config still exists:\n%#v", *resp.DeploymentConfigInfo.DeploymentConfigName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSCodeDeployDeploymentConfigExists(name string, config *codedeploy.DeploymentConfigInfo) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).codedeployconn
|
||||||
|
|
||||||
|
resp, err := conn.GetDeploymentConfig(&codedeploy.GetDeploymentConfigInput{
|
||||||
|
DeploymentConfigName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*config = *resp.DeploymentConfigInfo
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSCodeDeployDeploymentConfigFleet(rName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_codedeploy_deployment_config" "foo" {
|
||||||
|
deployment_config_name = "test-deployment-config-%s"
|
||||||
|
minimum_healthy_hosts {
|
||||||
|
type = "FLEET_PERCENT"
|
||||||
|
value = 75
|
||||||
|
}
|
||||||
|
}`, rName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSCodeDeployDeploymentConfigHostCount(rName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_codedeploy_deployment_config" "foo" {
|
||||||
|
deployment_config_name = "test-deployment-config-%s"
|
||||||
|
minimum_healthy_hosts {
|
||||||
|
type = "HOST_COUNT"
|
||||||
|
value = 1
|
||||||
|
}
|
||||||
|
}`, rName)
|
||||||
|
}
|
|
@ -123,6 +123,7 @@ func resourceAwsDbInstance() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
ValidateFunc: validateOnceADayWindowFormat,
|
||||||
},
|
},
|
||||||
|
|
||||||
"iops": {
|
"iops": {
|
||||||
|
@ -147,6 +148,7 @@ func resourceAwsDbInstance() *schema.Resource {
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
},
|
},
|
||||||
|
ValidateFunc: validateOnceAWeekWindowFormat,
|
||||||
},
|
},
|
||||||
|
|
||||||
"multi_az": {
|
"multi_az": {
|
||||||
|
@ -941,7 +943,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
req.DBPortNumber = aws.Int64(int64(d.Get("port").(int)))
|
req.DBPortNumber = aws.Int64(int64(d.Get("port").(int)))
|
||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
if d.HasChange("db_subnet_group_name") {
|
if d.HasChange("db_subnet_group_name") && !d.IsNewResource() {
|
||||||
d.SetPartial("db_subnet_group_name")
|
d.SetPartial("db_subnet_group_name")
|
||||||
req.DBSubnetGroupName = aws.String(d.Get("db_subnet_group_name").(string))
|
req.DBSubnetGroupName = aws.String(d.Get("db_subnet_group_name").(string))
|
||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
|
|
|
@ -80,6 +80,27 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
||||||
},
|
},
|
||||||
Set: resourceAwsEcsTaskDefinitionVolumeHash,
|
Set: resourceAwsEcsTaskDefinitionVolumeHash,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"placement_constraints": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
MaxItems: 10,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ForceNew: true,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"expression": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ForceNew: true,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -128,6 +149,19 @@ func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{}
|
||||||
input.Volumes = volumes
|
input.Volumes = volumes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constraints := d.Get("placement_constraints").(*schema.Set).List()
|
||||||
|
if len(constraints) > 0 {
|
||||||
|
var pc []*ecs.TaskDefinitionPlacementConstraint
|
||||||
|
for _, raw := range constraints {
|
||||||
|
p := raw.(map[string]interface{})
|
||||||
|
pc = append(pc, &ecs.TaskDefinitionPlacementConstraint{
|
||||||
|
Type: aws.String(p["type"].(string)),
|
||||||
|
Expression: aws.String(p["expression"].(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
input.PlacementConstraints = pc
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Registering ECS task definition: %s", input)
|
log.Printf("[DEBUG] Registering ECS task definition: %s", input)
|
||||||
out, err := conn.RegisterTaskDefinition(&input)
|
out, err := conn.RegisterTaskDefinition(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -167,10 +201,27 @@ func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{})
|
||||||
d.Set("task_role_arn", taskDefinition.TaskRoleArn)
|
d.Set("task_role_arn", taskDefinition.TaskRoleArn)
|
||||||
d.Set("network_mode", taskDefinition.NetworkMode)
|
d.Set("network_mode", taskDefinition.NetworkMode)
|
||||||
d.Set("volumes", flattenEcsVolumes(taskDefinition.Volumes))
|
d.Set("volumes", flattenEcsVolumes(taskDefinition.Volumes))
|
||||||
|
if err := d.Set("placement_constraints", flattenPlacementConstraints(taskDefinition.PlacementConstraints)); err != nil {
|
||||||
|
log.Printf("[ERR] Error setting placement_constraints for (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flattenPlacementConstraints(pcs []*ecs.TaskDefinitionPlacementConstraint) []map[string]interface{} {
|
||||||
|
if len(pcs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
results := make([]map[string]interface{}, 0)
|
||||||
|
for _, pc := range pcs {
|
||||||
|
c := make(map[string]interface{})
|
||||||
|
c["type"] = *pc.Type
|
||||||
|
c["expression"] = *pc.Expression
|
||||||
|
results = append(results, c)
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsEcsTaskDefinitionDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsEcsTaskDefinitionDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).ecsconn
|
conn := meta.(*AWSClient).ecsconn
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSEcsTaskDefinition_basic(t *testing.T) {
|
func TestAccAWSEcsTaskDefinition_basic(t *testing.T) {
|
||||||
|
var def ecs.TaskDefinition
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -19,13 +20,13 @@ func TestAccAWSEcsTaskDefinition_basic(t *testing.T) {
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinition,
|
Config: testAccAWSEcsTaskDefinition,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &def),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionModified,
|
Config: testAccAWSEcsTaskDefinitionModified,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &def),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -34,6 +35,7 @@ func TestAccAWSEcsTaskDefinition_basic(t *testing.T) {
|
||||||
|
|
||||||
// Regression for https://github.com/hashicorp/terraform/issues/2370
|
// Regression for https://github.com/hashicorp/terraform/issues/2370
|
||||||
func TestAccAWSEcsTaskDefinition_withScratchVolume(t *testing.T) {
|
func TestAccAWSEcsTaskDefinition_withScratchVolume(t *testing.T) {
|
||||||
|
var def ecs.TaskDefinition
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -42,7 +44,7 @@ func TestAccAWSEcsTaskDefinition_withScratchVolume(t *testing.T) {
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionWithScratchVolume,
|
Config: testAccAWSEcsTaskDefinitionWithScratchVolume,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -51,6 +53,7 @@ func TestAccAWSEcsTaskDefinition_withScratchVolume(t *testing.T) {
|
||||||
|
|
||||||
// Regression for https://github.com/hashicorp/terraform/issues/2694
|
// Regression for https://github.com/hashicorp/terraform/issues/2694
|
||||||
func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) {
|
func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) {
|
||||||
|
var def ecs.TaskDefinition
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -59,14 +62,14 @@ func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) {
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionWithEcsService,
|
Config: testAccAWSEcsTaskDefinitionWithEcsService,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def),
|
||||||
testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"),
|
testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionWithEcsServiceModified,
|
Config: testAccAWSEcsTaskDefinitionWithEcsServiceModified,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def),
|
||||||
testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"),
|
testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
@ -75,6 +78,7 @@ func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSEcsTaskDefinition_withTaskRoleArn(t *testing.T) {
|
func TestAccAWSEcsTaskDefinition_withTaskRoleArn(t *testing.T) {
|
||||||
|
var def ecs.TaskDefinition
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -83,7 +87,7 @@ func TestAccAWSEcsTaskDefinition_withTaskRoleArn(t *testing.T) {
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionWithTaskRoleArn,
|
Config: testAccAWSEcsTaskDefinitionWithTaskRoleArn,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -91,6 +95,7 @@ func TestAccAWSEcsTaskDefinition_withTaskRoleArn(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSEcsTaskDefinition_withNetworkMode(t *testing.T) {
|
func TestAccAWSEcsTaskDefinition_withNetworkMode(t *testing.T) {
|
||||||
|
var def ecs.TaskDefinition
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -99,7 +104,7 @@ func TestAccAWSEcsTaskDefinition_withNetworkMode(t *testing.T) {
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionWithNetworkMode,
|
Config: testAccAWSEcsTaskDefinitionWithNetworkMode,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_ecs_task_definition.sleep", "network_mode", "bridge"),
|
"aws_ecs_task_definition.sleep", "network_mode", "bridge"),
|
||||||
),
|
),
|
||||||
|
@ -108,6 +113,33 @@ func TestAccAWSEcsTaskDefinition_withNetworkMode(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSEcsTaskDefinition_constraint(t *testing.T) {
|
||||||
|
var def ecs.TaskDefinition
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEcsTaskDefinition_constraint,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &def),
|
||||||
|
resource.TestCheckResourceAttr("aws_ecs_task_definition.jenkins", "placement_constraints.#", "1"),
|
||||||
|
testAccCheckAWSTaskDefinitionConstraintsAttrs(&def),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if len(def.PlacementConstraints) != 1 {
|
||||||
|
return fmt.Errorf("Expected (1) placement_constraints, got (%d)", len(def.PlacementConstraints))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
func TestValidateAwsEcsTaskDefinitionNetworkMode(t *testing.T) {
|
func TestValidateAwsEcsTaskDefinitionNetworkMode(t *testing.T) {
|
||||||
validNames := []string{
|
validNames := []string{
|
||||||
"bridge",
|
"bridge",
|
||||||
|
@ -159,17 +191,82 @@ func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSEcsTaskDefinitionExists(name string) resource.TestCheckFunc {
|
func testAccCheckAWSEcsTaskDefinitionExists(name string, def *ecs.TaskDefinition) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
_, ok := s.RootModule().Resources[name]
|
rs, ok := s.RootModule().Resources[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", name)
|
return fmt.Errorf("Not found: %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).ecsconn
|
||||||
|
|
||||||
|
out, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
|
||||||
|
TaskDefinition: aws.String(rs.Primary.Attributes["arn"]),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*def = *out.TaskDefinition
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var testAccAWSEcsTaskDefinition_constraint = `
|
||||||
|
resource "aws_ecs_task_definition" "jenkins" {
|
||||||
|
family = "terraform-acc-test"
|
||||||
|
container_definitions = <<TASK_DEFINITION
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep", "10"],
|
||||||
|
"entryPoint": ["/"],
|
||||||
|
"environment": [
|
||||||
|
{"name": "VARNAME", "value": "VARVAL"}
|
||||||
|
],
|
||||||
|
"essential": true,
|
||||||
|
"image": "jenkins",
|
||||||
|
"links": ["mongodb"],
|
||||||
|
"memory": 128,
|
||||||
|
"name": "jenkins",
|
||||||
|
"portMappings": [
|
||||||
|
{
|
||||||
|
"containerPort": 80,
|
||||||
|
"hostPort": 8080
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep", "10"],
|
||||||
|
"entryPoint": ["/"],
|
||||||
|
"essential": true,
|
||||||
|
"image": "mongodb",
|
||||||
|
"memory": 128,
|
||||||
|
"name": "mongodb",
|
||||||
|
"portMappings": [
|
||||||
|
{
|
||||||
|
"containerPort": 28017,
|
||||||
|
"hostPort": 28017
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
TASK_DEFINITION
|
||||||
|
|
||||||
|
volume {
|
||||||
|
name = "jenkins-home"
|
||||||
|
host_path = "/ecs/jenkins-home"
|
||||||
|
}
|
||||||
|
|
||||||
|
placement_constraints {
|
||||||
|
type = "memberOf"
|
||||||
|
expression = "attribute:ecs.availability-zone in [us-west-2a, us-west-2b]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
var testAccAWSEcsTaskDefinition = `
|
var testAccAWSEcsTaskDefinition = `
|
||||||
resource "aws_ecs_task_definition" "jenkins" {
|
resource "aws_ecs_task_definition" "jenkins" {
|
||||||
family = "terraform-acc-test"
|
family = "terraform-acc-test"
|
||||||
|
|
|
@ -200,13 +200,13 @@ func resourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DNS name per http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html
|
// DNS name per http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html
|
||||||
az, err := getAzFromSubnetId(*mt.SubnetId, meta.(*AWSClient).ec2conn)
|
_, err = getAzFromSubnetId(*mt.SubnetId, meta.(*AWSClient).ec2conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", *mt.SubnetId, err)
|
return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", *mt.SubnetId, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
region := meta.(*AWSClient).region
|
region := meta.(*AWSClient).region
|
||||||
err = d.Set("dns_name", resourceAwsEfsMountTargetDnsName(az, *mt.FileSystemId, region))
|
err = d.Set("dns_name", resourceAwsEfsMountTargetDnsName(*mt.FileSystemId, region))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -286,8 +286,8 @@ func resourceAwsEfsMountTargetDelete(d *schema.ResourceData, meta interface{}) e
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsEfsMountTargetDnsName(az, fileSystemId, region string) string {
|
func resourceAwsEfsMountTargetDnsName(fileSystemId, region string) string {
|
||||||
return fmt.Sprintf("%s.%s.efs.%s.amazonaws.com", az, fileSystemId, region)
|
return fmt.Sprintf("%s.efs.%s.amazonaws.com", fileSystemId, region)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasEmptyMountTargets(mto *efs.DescribeMountTargetsOutput) bool {
|
func hasEmptyMountTargets(mto *efs.DescribeMountTargetsOutput) bool {
|
||||||
|
|
|
@ -34,7 +34,7 @@ func TestAccAWSEFSMountTarget_basic(t *testing.T) {
|
||||||
resource.TestMatchResourceAttr(
|
resource.TestMatchResourceAttr(
|
||||||
"aws_efs_mount_target.alpha",
|
"aws_efs_mount_target.alpha",
|
||||||
"dns_name",
|
"dns_name",
|
||||||
regexp.MustCompile("^us-west-2a.[^.]+.efs.us-west-2.amazonaws.com$"),
|
regexp.MustCompile("^[^.]+.efs.us-west-2.amazonaws.com$"),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
@ -48,7 +48,7 @@ func TestAccAWSEFSMountTarget_basic(t *testing.T) {
|
||||||
resource.TestMatchResourceAttr(
|
resource.TestMatchResourceAttr(
|
||||||
"aws_efs_mount_target.alpha",
|
"aws_efs_mount_target.alpha",
|
||||||
"dns_name",
|
"dns_name",
|
||||||
regexp.MustCompile("^us-west-2a.[^.]+.efs.us-west-2.amazonaws.com$"),
|
regexp.MustCompile("^[^.]+.efs.us-west-2.amazonaws.com$"),
|
||||||
),
|
),
|
||||||
testAccCheckEfsMountTarget(
|
testAccCheckEfsMountTarget(
|
||||||
"aws_efs_mount_target.beta",
|
"aws_efs_mount_target.beta",
|
||||||
|
@ -57,7 +57,7 @@ func TestAccAWSEFSMountTarget_basic(t *testing.T) {
|
||||||
resource.TestMatchResourceAttr(
|
resource.TestMatchResourceAttr(
|
||||||
"aws_efs_mount_target.beta",
|
"aws_efs_mount_target.beta",
|
||||||
"dns_name",
|
"dns_name",
|
||||||
regexp.MustCompile("^us-west-2b.[^.]+.efs.us-west-2.amazonaws.com$"),
|
regexp.MustCompile("^[^.]+.efs.us-west-2.amazonaws.com$"),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
@ -91,10 +91,9 @@ func TestAccAWSEFSMountTarget_disappears(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResourceAWSEFSMountTarget_mountTargetDnsName(t *testing.T) {
|
func TestResourceAWSEFSMountTarget_mountTargetDnsName(t *testing.T) {
|
||||||
actual := resourceAwsEfsMountTargetDnsName("non-existent-1c",
|
actual := resourceAwsEfsMountTargetDnsName("fs-123456ab", "non-existent-1")
|
||||||
"fs-123456ab", "non-existent-1")
|
|
||||||
|
|
||||||
expected := "non-existent-1c.fs-123456ab.efs.non-existent-1.amazonaws.com"
|
expected := "fs-123456ab.efs.non-existent-1.amazonaws.com"
|
||||||
if actual != expected {
|
if actual != expected {
|
||||||
t.Fatalf("Expected EFS mount target DNS name to be %s, got %s",
|
t.Fatalf("Expected EFS mount target DNS name to be %s, got %s",
|
||||||
expected, actual)
|
expected, actual)
|
||||||
|
|
|
@ -80,6 +80,7 @@ func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
ValidateFunc: validateOnceADayWindowFormat,
|
||||||
},
|
},
|
||||||
"snapshot_name": &schema.Schema{
|
"snapshot_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -96,6 +97,7 @@ func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema {
|
||||||
// to lowercase
|
// to lowercase
|
||||||
return strings.ToLower(val.(string))
|
return strings.ToLower(val.(string))
|
||||||
},
|
},
|
||||||
|
ValidateFunc: validateOnceAWeekWindowFormat,
|
||||||
},
|
},
|
||||||
"port": &schema.Schema{
|
"port": &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
|
|
|
@ -115,6 +115,7 @@ func resourceAwsElb() *schema.Resource {
|
||||||
"access_logs": &schema.Schema{
|
"access_logs": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
MaxItems: 1,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"interval": &schema.Schema{
|
"interval": &schema.Schema{
|
||||||
|
@ -392,7 +393,26 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
||||||
d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled)
|
d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled)
|
||||||
if lbAttrs.AccessLog != nil {
|
if lbAttrs.AccessLog != nil {
|
||||||
if err := d.Set("access_logs", flattenAccessLog(lbAttrs.AccessLog)); err != nil {
|
// The AWS API does not allow users to remove access_logs, only disable them.
|
||||||
|
// During creation of the ELB, Terraform sets the access_logs to disabled,
|
||||||
|
// so there should not be a case where lbAttrs.AccessLog above is nil.
|
||||||
|
|
||||||
|
// Here we do not record the remove value of access_log if:
|
||||||
|
// - there is no access_log block in the configuration
|
||||||
|
// - the remote access_logs are disabled
|
||||||
|
//
|
||||||
|
// This indicates there is no access_log in the configuration.
|
||||||
|
// - externally added access_logs will be enabled, so we'll detect the drift
|
||||||
|
// - locally added access_logs will be in the config, so we'll add to the
|
||||||
|
// API/state
|
||||||
|
// See https://github.com/hashicorp/terraform/issues/10138
|
||||||
|
_, n := d.GetChange("access_logs")
|
||||||
|
elbal := lbAttrs.AccessLog
|
||||||
|
nl := n.([]interface{})
|
||||||
|
if len(nl) == 0 && !*elbal.Enabled {
|
||||||
|
elbal = nil
|
||||||
|
}
|
||||||
|
if err := d.Set("access_logs", flattenAccessLog(elbal)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -533,18 +553,16 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
logs := d.Get("access_logs").([]interface{})
|
logs := d.Get("access_logs").([]interface{})
|
||||||
if len(logs) > 1 {
|
if len(logs) == 1 {
|
||||||
return fmt.Errorf("Only one access logs config per ELB is supported")
|
l := logs[0].(map[string]interface{})
|
||||||
} else if len(logs) == 1 {
|
|
||||||
log := logs[0].(map[string]interface{})
|
|
||||||
accessLog := &elb.AccessLog{
|
accessLog := &elb.AccessLog{
|
||||||
Enabled: aws.Bool(log["enabled"].(bool)),
|
Enabled: aws.Bool(l["enabled"].(bool)),
|
||||||
EmitInterval: aws.Int64(int64(log["interval"].(int))),
|
EmitInterval: aws.Int64(int64(l["interval"].(int))),
|
||||||
S3BucketName: aws.String(log["bucket"].(string)),
|
S3BucketName: aws.String(l["bucket"].(string)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if log["bucket_prefix"] != "" {
|
if l["bucket_prefix"] != "" {
|
||||||
accessLog.S3BucketPrefix = aws.String(log["bucket_prefix"].(string))
|
accessLog.S3BucketPrefix = aws.String(l["bucket_prefix"].(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
attrs.LoadBalancerAttributes.AccessLog = accessLog
|
attrs.LoadBalancerAttributes.AccessLog = accessLog
|
||||||
|
|
|
@ -82,9 +82,11 @@ func TestAccAWSELB_fullCharacterRange(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSELB_AccessLogs(t *testing.T) {
|
func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
||||||
var conf elb.LoadBalancerDescription
|
var conf elb.LoadBalancerDescription
|
||||||
|
|
||||||
|
rName := fmt.Sprintf("terraform-access-logs-bucket-%d", acctest.RandInt())
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
IDRefreshName: "aws_elb.foo",
|
IDRefreshName: "aws_elb.foo",
|
||||||
|
@ -99,15 +101,62 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSELBAccessLogsOn,
|
Config: testAccAWSELBAccessLogsOn(rName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_elb.foo", "access_logs.#", "1"),
|
"aws_elb.foo", "access_logs.#", "1"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_elb.foo", "access_logs.0.bucket", "terraform-access-logs-bucket"),
|
"aws_elb.foo", "access_logs.0.bucket", rName),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_elb.foo", "access_logs.0.interval", "5"),
|
"aws_elb.foo", "access_logs.0.interval", "5"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.0.enabled", "true"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSELBAccessLogs,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.#", "0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
||||||
|
var conf elb.LoadBalancerDescription
|
||||||
|
|
||||||
|
rName := fmt.Sprintf("terraform-access-logs-bucket-%d", acctest.RandInt())
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "aws_elb.foo",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSELBAccessLogs,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSELBAccessLogsDisabled(rName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.#", "1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.0.bucket", rName),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.0.interval", "5"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.0.enabled", "false"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -995,12 +1044,14 @@ resource "aws_elb" "foo" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
const testAccAWSELBAccessLogsOn = `
|
|
||||||
|
func testAccAWSELBAccessLogsOn(r string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
# an S3 bucket configured for Access logs
|
# an S3 bucket configured for Access logs
|
||||||
# The 797873946194 is the AWS ID for us-west-2, so this test
|
# The 797873946194 is the AWS ID for us-west-2, so this test
|
||||||
# must be ran in us-west-2
|
# must be ran in us-west-2
|
||||||
resource "aws_s3_bucket" "acceslogs_bucket" {
|
resource "aws_s3_bucket" "acceslogs_bucket" {
|
||||||
bucket = "terraform-access-logs-bucket"
|
bucket = "%s"
|
||||||
acl = "private"
|
acl = "private"
|
||||||
force_destroy = true
|
force_destroy = true
|
||||||
policy = <<EOF
|
policy = <<EOF
|
||||||
|
@ -1013,7 +1064,7 @@ resource "aws_s3_bucket" "acceslogs_bucket" {
|
||||||
"Principal": {
|
"Principal": {
|
||||||
"AWS": "arn:aws:iam::797873946194:root"
|
"AWS": "arn:aws:iam::797873946194:root"
|
||||||
},
|
},
|
||||||
"Resource": "arn:aws:s3:::terraform-access-logs-bucket/*",
|
"Resource": "arn:aws:s3:::%s/*",
|
||||||
"Sid": "Stmt1446575236270"
|
"Sid": "Stmt1446575236270"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -1037,7 +1088,55 @@ resource "aws_elb" "foo" {
|
||||||
bucket = "${aws_s3_bucket.acceslogs_bucket.bucket}"
|
bucket = "${aws_s3_bucket.acceslogs_bucket.bucket}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`, r, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSELBAccessLogsDisabled(r string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
# an S3 bucket configured for Access logs
|
||||||
|
# The 797873946194 is the AWS ID for us-west-2, so this test
|
||||||
|
# must be ran in us-west-2
|
||||||
|
resource "aws_s3_bucket" "acceslogs_bucket" {
|
||||||
|
bucket = "%s"
|
||||||
|
acl = "private"
|
||||||
|
force_destroy = true
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Id": "Policy1446577137248",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "s3:PutObject",
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": {
|
||||||
|
"AWS": "arn:aws:iam::797873946194:root"
|
||||||
|
},
|
||||||
|
"Resource": "arn:aws:s3:::%s/*",
|
||||||
|
"Sid": "Stmt1446575236270"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Version": "2012-10-17"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb" "foo" {
|
||||||
|
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||||
|
|
||||||
|
listener {
|
||||||
|
instance_port = 8000
|
||||||
|
instance_protocol = "http"
|
||||||
|
lb_port = 80
|
||||||
|
lb_protocol = "http"
|
||||||
|
}
|
||||||
|
|
||||||
|
access_logs {
|
||||||
|
interval = 5
|
||||||
|
bucket = "${aws_s3_bucket.acceslogs_bucket.bucket}"
|
||||||
|
enabled = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, r, r)
|
||||||
|
}
|
||||||
|
|
||||||
const testAccAWSELBGeneratedName = `
|
const testAccAWSELBGeneratedName = `
|
||||||
resource "aws_elb" "foo" {
|
resource "aws_elb" "foo" {
|
||||||
|
|
|
@ -90,7 +90,7 @@ func TestAccAWSUserLoginProfile_notAKey(t *testing.T) {
|
||||||
{
|
{
|
||||||
// We own this account but it doesn't have any key associated with it
|
// We own this account but it doesn't have any key associated with it
|
||||||
Config: testAccAWSUserLoginProfileConfig(username, "/", "lolimnotakey"),
|
Config: testAccAWSUserLoginProfileConfig(username, "/", "lolimnotakey"),
|
||||||
ExpectError: regexp.MustCompile(`Error encrypting password`),
|
ExpectError: regexp.MustCompile(`Error encrypting Password`),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -241,7 +241,9 @@ resource "aws_iam_access_key" "user" {
|
||||||
|
|
||||||
resource "aws_iam_user_login_profile" "user" {
|
resource "aws_iam_user_login_profile" "user" {
|
||||||
user = "${aws_iam_user.user.name}"
|
user = "${aws_iam_user.user.name}"
|
||||||
pgp_key = "%s"
|
pgp_key = <<EOF
|
||||||
|
%s
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
`, r, p, key)
|
`, r, p, key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,8 +75,10 @@ func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
keyName = v.(string)
|
keyName = v.(string)
|
||||||
} else if v, ok := d.GetOk("key_name_prefix"); ok {
|
} else if v, ok := d.GetOk("key_name_prefix"); ok {
|
||||||
keyName = resource.PrefixedUniqueId(v.(string))
|
keyName = resource.PrefixedUniqueId(v.(string))
|
||||||
|
d.Set("key_name", keyName)
|
||||||
} else {
|
} else {
|
||||||
keyName = resource.UniqueId()
|
keyName = resource.UniqueId()
|
||||||
|
d.Set("key_name", keyName)
|
||||||
}
|
}
|
||||||
|
|
||||||
publicKey := d.Get("public_key").(string)
|
publicKey := d.Get("public_key").(string)
|
||||||
|
|
|
@ -83,43 +83,6 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
// elements removed in v0.7.0
|
|
||||||
"role_arn": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Removed: "role_arn has been removed. Use a s3_configuration block instead. See https://terraform.io/docs/providers/aws/r/kinesis_firehose_delivery_stream.html",
|
|
||||||
},
|
|
||||||
|
|
||||||
"s3_bucket_arn": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Removed: "s3_bucket_arn has been removed. Use a s3_configuration block instead. See https://terraform.io/docs/providers/aws/r/kinesis_firehose_delivery_stream.html",
|
|
||||||
},
|
|
||||||
|
|
||||||
"s3_prefix": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Removed: "s3_prefix has been removed. Use a s3_configuration block instead. See https://terraform.io/docs/providers/aws/r/kinesis_firehose_delivery_stream.html",
|
|
||||||
},
|
|
||||||
|
|
||||||
"s3_buffer_size": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
Removed: "s3_buffer_size has been removed. Use a s3_configuration block instead. See https://terraform.io/docs/providers/aws/r/kinesis_firehose_delivery_stream.html",
|
|
||||||
},
|
|
||||||
|
|
||||||
"s3_buffer_interval": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
Removed: "s3_buffer_interval has been removed. Use a s3_configuration block instead. See https://terraform.io/docs/providers/aws/r/kinesis_firehose_delivery_stream.html",
|
|
||||||
},
|
|
||||||
|
|
||||||
"s3_data_compression": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Removed: "s3_data_compression has been removed. Use a s3_configuration block instead. See https://terraform.io/docs/providers/aws/r/kinesis_firehose_delivery_stream.html",
|
|
||||||
},
|
|
||||||
|
|
||||||
"s3_configuration": {
|
"s3_configuration": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
|
|
@ -148,6 +148,7 @@ resource "aws_lambda_function" "lambda_function_test_create" {
|
||||||
function_name = "example_lambda_name_create"
|
function_name = "example_lambda_name_create"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.example"
|
handler = "exports.example"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_lambda_alias" "lambda_alias_test" {
|
resource "aws_lambda_alias" "lambda_alias_test" {
|
||||||
|
|
|
@ -231,6 +231,7 @@ resource "aws_lambda_function" "lambda_function_test_create" {
|
||||||
function_name = "example_lambda_name_create"
|
function_name = "example_lambda_name_create"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.example"
|
handler = "exports.example"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_lambda_function" "lambda_function_test_update" {
|
resource "aws_lambda_function" "lambda_function_test_update" {
|
||||||
|
@ -315,6 +316,7 @@ resource "aws_lambda_function" "lambda_function_test_create" {
|
||||||
function_name = "example_lambda_name_create"
|
function_name = "example_lambda_name_create"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.example"
|
handler = "exports.example"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_lambda_function" "lambda_function_test_update" {
|
resource "aws_lambda_function" "lambda_function_test_update" {
|
||||||
|
|
|
@ -460,6 +460,7 @@ resource "aws_lambda_function" "test_lambda" {
|
||||||
function_name = "lambda_function_name_perm"
|
function_name = "lambda_function_name_perm"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.handler"
|
handler = "exports.handler"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
|
@ -495,6 +496,7 @@ resource "aws_lambda_function" "test_lambda" {
|
||||||
function_name = "lambda_function_name_perm_raw_func_name"
|
function_name = "lambda_function_name_perm_raw_func_name"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.handler"
|
handler = "exports.handler"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
|
@ -540,6 +542,7 @@ resource "aws_lambda_function" "test_lambda" {
|
||||||
function_name = "lambda_function_name_perm_qualifier"
|
function_name = "lambda_function_name_perm_qualifier"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.handler"
|
handler = "exports.handler"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
|
@ -583,6 +586,7 @@ resource "aws_lambda_function" "test_lambda" {
|
||||||
function_name = "lambda_function_name_perm_multiperms"
|
function_name = "lambda_function_name_perm_multiperms"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.handler"
|
handler = "exports.handler"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
|
@ -635,6 +639,7 @@ resource "aws_lambda_function" "my-func" {
|
||||||
function_name = "lambda_function_name_perm_s3"
|
function_name = "lambda_function_name_perm_s3"
|
||||||
role = "${aws_iam_role.police.arn}"
|
role = "${aws_iam_role.police.arn}"
|
||||||
handler = "exports.handler"
|
handler = "exports.handler"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "police" {
|
resource "aws_iam_role" "police" {
|
||||||
|
@ -681,6 +686,7 @@ resource "aws_lambda_function" "my-func" {
|
||||||
function_name = "lambda_function_name_perm_sns"
|
function_name = "lambda_function_name_perm_sns"
|
||||||
role = "${aws_iam_role.police.arn}"
|
role = "${aws_iam_role.police.arn}"
|
||||||
handler = "exports.handler"
|
handler = "exports.handler"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "police" {
|
resource "aws_iam_role" "police" {
|
||||||
|
|
|
@ -9,34 +9,38 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/elb"
|
"github.com/aws/aws-sdk-go/service/elb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSLoadBalancerListenerPolicy_basic(t *testing.T) {
|
func TestAccAWSLoadBalancerListenerPolicy_basic(t *testing.T) {
|
||||||
|
rChar := acctest.RandStringFromCharSet(6, acctest.CharSetAlpha)
|
||||||
|
lbName := fmt.Sprintf("%s", rChar)
|
||||||
|
mcName := fmt.Sprintf("%s", rChar)
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSLoadBalancerListenerPolicyDestroy,
|
CheckDestroy: testAccCheckAWSLoadBalancerListenerPolicyDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSLoadBalancerListenerPolicyConfig_basic0,
|
Config: testAccAWSLoadBalancerListenerPolicyConfig_basic0(lbName, mcName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"),
|
testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"),
|
||||||
testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-policies-lb", int64(80), "magic-cookie-sticky-policy", true),
|
testAccCheckAWSLoadBalancerListenerPolicyState(lbName, int64(80), mcName, true),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSLoadBalancerListenerPolicyConfig_basic1,
|
Config: testAccAWSLoadBalancerListenerPolicyConfig_basic1(lbName, mcName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"),
|
testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"),
|
||||||
testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-policies-lb", int64(80), "magic-cookie-sticky-policy", true),
|
testAccCheckAWSLoadBalancerListenerPolicyState(lbName, int64(80), mcName, true),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSLoadBalancerListenerPolicyConfig_basic2,
|
Config: testAccAWSLoadBalancerListenerPolicyConfig_basic2(lbName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-policies-lb", int64(80), "magic-cookie-sticky-policy", false),
|
testAccCheckAWSLoadBalancerListenerPolicyState(lbName, int64(80), mcName, false),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -142,9 +146,10 @@ func testAccCheckAWSLoadBalancerListenerPolicyState(loadBalancerName string, loa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAWSLoadBalancerListenerPolicyConfig_basic0 = `
|
func testAccAWSLoadBalancerListenerPolicyConfig_basic0(lbName, mcName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
resource "aws_elb" "test-lb" {
|
resource "aws_elb" "test-lb" {
|
||||||
name = "test-aws-policies-lb"
|
name = "%s"
|
||||||
availability_zones = ["us-west-2a"]
|
availability_zones = ["us-west-2a"]
|
||||||
|
|
||||||
listener {
|
listener {
|
||||||
|
@ -161,7 +166,7 @@ resource "aws_elb" "test-lb" {
|
||||||
|
|
||||||
resource "aws_load_balancer_policy" "magic-cookie-sticky" {
|
resource "aws_load_balancer_policy" "magic-cookie-sticky" {
|
||||||
load_balancer_name = "${aws_elb.test-lb.name}"
|
load_balancer_name = "${aws_elb.test-lb.name}"
|
||||||
policy_name = "magic-cookie-sticky-policy"
|
policy_name = "%s"
|
||||||
policy_type_name = "AppCookieStickinessPolicyType"
|
policy_type_name = "AppCookieStickinessPolicyType"
|
||||||
policy_attribute = {
|
policy_attribute = {
|
||||||
name = "CookieName"
|
name = "CookieName"
|
||||||
|
@ -175,12 +180,13 @@ resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" {
|
||||||
policy_names = [
|
policy_names = [
|
||||||
"${aws_load_balancer_policy.magic-cookie-sticky.policy_name}",
|
"${aws_load_balancer_policy.magic-cookie-sticky.policy_name}",
|
||||||
]
|
]
|
||||||
|
}`, lbName, mcName)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
const testAccAWSLoadBalancerListenerPolicyConfig_basic1 = `
|
func testAccAWSLoadBalancerListenerPolicyConfig_basic1(lbName, mcName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
resource "aws_elb" "test-lb" {
|
resource "aws_elb" "test-lb" {
|
||||||
name = "test-aws-policies-lb"
|
name = "%s"
|
||||||
availability_zones = ["us-west-2a"]
|
availability_zones = ["us-west-2a"]
|
||||||
|
|
||||||
listener {
|
listener {
|
||||||
|
@ -197,7 +203,7 @@ resource "aws_elb" "test-lb" {
|
||||||
|
|
||||||
resource "aws_load_balancer_policy" "magic-cookie-sticky" {
|
resource "aws_load_balancer_policy" "magic-cookie-sticky" {
|
||||||
load_balancer_name = "${aws_elb.test-lb.name}"
|
load_balancer_name = "${aws_elb.test-lb.name}"
|
||||||
policy_name = "magic-cookie-sticky-policy"
|
policy_name = "%s"
|
||||||
policy_type_name = "AppCookieStickinessPolicyType"
|
policy_type_name = "AppCookieStickinessPolicyType"
|
||||||
policy_attribute = {
|
policy_attribute = {
|
||||||
name = "CookieName"
|
name = "CookieName"
|
||||||
|
@ -211,12 +217,13 @@ resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" {
|
||||||
policy_names = [
|
policy_names = [
|
||||||
"${aws_load_balancer_policy.magic-cookie-sticky.policy_name}"
|
"${aws_load_balancer_policy.magic-cookie-sticky.policy_name}"
|
||||||
]
|
]
|
||||||
|
}`, lbName, mcName)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
const testAccAWSLoadBalancerListenerPolicyConfig_basic2 = `
|
func testAccAWSLoadBalancerListenerPolicyConfig_basic2(lbName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
resource "aws_elb" "test-lb" {
|
resource "aws_elb" "test-lb" {
|
||||||
name = "test-aws-policies-lb"
|
name = "%s"
|
||||||
availability_zones = ["us-west-2a"]
|
availability_zones = ["us-west-2a"]
|
||||||
|
|
||||||
listener {
|
listener {
|
||||||
|
@ -229,5 +236,5 @@ resource "aws_elb" "test-lb" {
|
||||||
tags {
|
tags {
|
||||||
Name = "tf-acc-test"
|
Name = "tf-acc-test"
|
||||||
}
|
}
|
||||||
|
}`, lbName)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
|
@ -163,6 +163,7 @@ func resourceAwsRDSCluster() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
ValidateFunc: validateOnceADayWindowFormat,
|
||||||
},
|
},
|
||||||
|
|
||||||
"preferred_maintenance_window": {
|
"preferred_maintenance_window": {
|
||||||
|
@ -175,6 +176,7 @@ func resourceAwsRDSCluster() *schema.Resource {
|
||||||
}
|
}
|
||||||
return strings.ToLower(val.(string))
|
return strings.ToLower(val.(string))
|
||||||
},
|
},
|
||||||
|
ValidateFunc: validateOnceAWeekWindowFormat,
|
||||||
},
|
},
|
||||||
|
|
||||||
"backup_retention_period": {
|
"backup_retention_period": {
|
||||||
|
|
|
@ -101,6 +101,7 @@ func resourceAwsRedshiftCluster() *schema.Resource {
|
||||||
}
|
}
|
||||||
return strings.ToLower(val.(string))
|
return strings.ToLower(val.(string))
|
||||||
},
|
},
|
||||||
|
ValidateFunc: validateOnceAWeekWindowFormat,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cluster_parameter_group_name": {
|
"cluster_parameter_group_name": {
|
||||||
|
@ -416,7 +417,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{})
|
||||||
Pending: []string{"creating", "backing-up", "modifying", "restoring"},
|
Pending: []string{"creating", "backing-up", "modifying", "restoring"},
|
||||||
Target: []string{"available"},
|
Target: []string{"available"},
|
||||||
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
|
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
|
||||||
Timeout: 40 * time.Minute,
|
Timeout: 75 * time.Minute,
|
||||||
MinTimeout: 10 * time.Second,
|
MinTimeout: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -594,8 +595,8 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
|
||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("vpc_security_group_ips") {
|
if d.HasChange("vpc_security_group_ids") {
|
||||||
req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ips").(*schema.Set).List())
|
req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ids").(*schema.Set).List())
|
||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,9 @@ func resourceAwsRoute53Record() *schema.Resource {
|
||||||
Read: resourceAwsRoute53RecordRead,
|
Read: resourceAwsRoute53RecordRead,
|
||||||
Update: resourceAwsRoute53RecordUpdate,
|
Update: resourceAwsRoute53RecordUpdate,
|
||||||
Delete: resourceAwsRoute53RecordDelete,
|
Delete: resourceAwsRoute53RecordDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
MigrateState: resourceAwsRoute53RecordMigrateState,
|
MigrateState: resourceAwsRoute53RecordMigrateState,
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
@ -50,6 +52,7 @@ func resourceAwsRoute53Record() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
ValidateFunc: validateRoute53RecordType,
|
||||||
},
|
},
|
||||||
|
|
||||||
"zone_id": &schema.Schema{
|
"zone_id": &schema.Schema{
|
||||||
|
|
|
@ -6,6 +6,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -408,6 +410,10 @@ func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := validateS3BucketName(bucket, awsRegion); err != nil {
|
||||||
|
return fmt.Errorf("Error validating S3 bucket name: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||||
log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
|
log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
|
||||||
_, err := s3conn.CreateBucket(req)
|
_, err := s3conn.CreateBucket(req)
|
||||||
|
@ -1728,6 +1734,40 @@ func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region.
|
||||||
|
// Buckets outside of this region have to be DNS-compliant. After the same restrictions are
|
||||||
|
// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc
|
||||||
|
func validateS3BucketName(value string, region string) error {
|
||||||
|
if region != "us-east-1" {
|
||||||
|
if (len(value) < 3) || (len(value) > 63) {
|
||||||
|
return fmt.Errorf("%q must contain from 3 to 63 characters", value)
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) {
|
||||||
|
return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value)
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) {
|
||||||
|
return fmt.Errorf("%q must not be formatted as an IP address", value)
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(value, `.`) {
|
||||||
|
return fmt.Errorf("%q cannot start with a period", value)
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(value, `.`) {
|
||||||
|
return fmt.Errorf("%q cannot end with a period", value)
|
||||||
|
}
|
||||||
|
if strings.Contains(value, `..`) {
|
||||||
|
return fmt.Errorf("%q can be only one period between labels", value)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(value) > 255 {
|
||||||
|
return fmt.Errorf("%q must contain less than 256 characters", value)
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) {
|
||||||
|
return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func expirationHash(v interface{}) int {
|
func expirationHash(v interface{}) int {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
m := v.(map[string]interface{})
|
m := v.(map[string]interface{})
|
||||||
|
|
|
@ -455,6 +455,7 @@ resource "aws_lambda_function" "func" {
|
||||||
function_name = "example_lambda_name_%d"
|
function_name = "example_lambda_name_%d"
|
||||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||||
handler = "exports.example"
|
handler = "exports.example"
|
||||||
|
runtime = "nodejs4.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_s3_bucket" "bucket" {
|
resource "aws_s3_bucket" "bucket" {
|
||||||
|
|
|
@ -12,6 +12,8 @@ import (
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
@ -690,6 +692,68 @@ func TestAccAWSS3Bucket_ReplicationExpectVersioningValidationError(t *testing.T)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAWSS3BucketName(t *testing.T) {
|
||||||
|
validDnsNames := []string{
|
||||||
|
"foobar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo.bar.baz",
|
||||||
|
"1234",
|
||||||
|
"foo-bar",
|
||||||
|
strings.Repeat("x", 63),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range validDnsNames {
|
||||||
|
if err := validateS3BucketName(v, "us-west-2"); err != nil {
|
||||||
|
t.Fatalf("%q should be a valid S3 bucket name", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
invalidDnsNames := []string{
|
||||||
|
"foo..bar",
|
||||||
|
"Foo.Bar",
|
||||||
|
"192.168.0.1",
|
||||||
|
"127.0.0.1",
|
||||||
|
".foo",
|
||||||
|
"bar.",
|
||||||
|
"foo_bar",
|
||||||
|
strings.Repeat("x", 64),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range invalidDnsNames {
|
||||||
|
if err := validateS3BucketName(v, "us-west-2"); err == nil {
|
||||||
|
t.Fatalf("%q should not be a valid S3 bucket name", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
validEastNames := []string{
|
||||||
|
"foobar",
|
||||||
|
"foo_bar",
|
||||||
|
"127.0.0.1",
|
||||||
|
"foo..bar",
|
||||||
|
"foo_bar_baz",
|
||||||
|
"foo.bar.baz",
|
||||||
|
"Foo.Bar",
|
||||||
|
strings.Repeat("x", 255),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range validEastNames {
|
||||||
|
if err := validateS3BucketName(v, "us-east-1"); err != nil {
|
||||||
|
t.Fatalf("%q should be a valid S3 bucket name", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
invalidEastNames := []string{
|
||||||
|
"foo;bar",
|
||||||
|
strings.Repeat("x", 256),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range invalidEastNames {
|
||||||
|
if err := validateS3BucketName(v, "us-east-1"); err == nil {
|
||||||
|
t.Fatalf("%q should not be a valid S3 bucket name", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
|
func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
|
||||||
return testAccCheckInstanceDestroyWithProvider(s, testAccProvider)
|
return testAccCheckInstanceDestroyWithProvider(s, testAccProvider)
|
||||||
}
|
}
|
||||||
|
|
|
@ -360,7 +360,10 @@ func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.Param
|
||||||
func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} {
|
func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} {
|
||||||
result := make([]map[string]interface{}, 0, 1)
|
result := make([]map[string]interface{}, 0, 1)
|
||||||
|
|
||||||
if l != nil && *l.Enabled {
|
if l == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
r := make(map[string]interface{})
|
r := make(map[string]interface{})
|
||||||
if l.S3BucketName != nil {
|
if l.S3BucketName != nil {
|
||||||
r["bucket"] = *l.S3BucketName
|
r["bucket"] = *l.S3BucketName
|
||||||
|
@ -379,7 +382,6 @@ func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
result = append(result, r)
|
result = append(result, r)
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
|
@ -624,3 +624,52 @@ func validateSecurityRuleType(v interface{}, k string) (ws []string, errors []er
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateOnceAWeekWindowFormat(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
// valid time format is "ddd:hh24:mi"
|
||||||
|
validTimeFormat := "(sun|mon|tue|wed|thu|fri|sat):([0-1][0-9]|2[0-3]):([0-5][0-9])"
|
||||||
|
|
||||||
|
value := strings.ToLower(v.(string))
|
||||||
|
if !regexp.MustCompile(validTimeFormat + "-" + validTimeFormat).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must satisfy the format of \"ddd:hh24:mi-ddd:hh24:mi\".", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOnceADayWindowFormat(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
// valid time format is "hh24:mi"
|
||||||
|
validTimeFormat := "([0-1][0-9]|2[0-3]):([0-5][0-9])"
|
||||||
|
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(validTimeFormat + "-" + validTimeFormat).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must satisfy the format of \"hh24:mi-hh24:mi\".", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRoute53RecordType(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
// Valid Record types
|
||||||
|
// SOA, A, TXT, NS, CNAME, MX, NAPTR, PTR, SRV, SPF, AAAA
|
||||||
|
validTypes := map[string]struct{}{
|
||||||
|
"SOA": {},
|
||||||
|
"A": {},
|
||||||
|
"TXT": {},
|
||||||
|
"NS": {},
|
||||||
|
"CNAME": {},
|
||||||
|
"MX": {},
|
||||||
|
"NAPTR": {},
|
||||||
|
"PTR": {},
|
||||||
|
"SRV": {},
|
||||||
|
"SPF": {},
|
||||||
|
"AAAA": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
value := v.(string)
|
||||||
|
if _, ok := validTypes[value]; !ok {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must be one of [SOA, A, TXT, NS, CNAME, MX, NAPTR, PTR, SRV, SPF, AAAA]", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
@ -923,3 +923,123 @@ func TestValidateSecurityRuleType(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateOnceAWeekWindowFormat(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// once a day window format
|
||||||
|
Value: "04:00-05:00",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// invalid day of week
|
||||||
|
Value: "san:04:00-san:05:00",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// invalid hour
|
||||||
|
Value: "sun:24:00-san:25:00",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// invalid min
|
||||||
|
Value: "sun:04:00-sun:04:60",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// valid format
|
||||||
|
Value: "sun:04:00-sun:05:00",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// "Sun" can also be used
|
||||||
|
Value: "Sun:04:00-Sun:05:00",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOnceAWeekWindowFormat(tc.Value, "maintenance_window")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected %d validation errors, But got %d errors for \"%s\"", tc.ErrCount, len(errors), tc.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateOnceADayWindowFormat(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// once a week window format
|
||||||
|
Value: "sun:04:00-sun:05:00",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// invalid hour
|
||||||
|
Value: "24:00-25:00",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// invalid min
|
||||||
|
Value: "04:00-04:60",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// valid format
|
||||||
|
Value: "04:00-05:00",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOnceADayWindowFormat(tc.Value, "backup_window")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected %d validation errors, But got %d errors for \"%s\"", tc.ErrCount, len(errors), tc.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateRoute53RecordType(t *testing.T) {
|
||||||
|
validTypes := []string{
|
||||||
|
"AAAA",
|
||||||
|
"SOA",
|
||||||
|
"A",
|
||||||
|
"TXT",
|
||||||
|
"CNAME",
|
||||||
|
"MX",
|
||||||
|
"NAPTR",
|
||||||
|
"PTR",
|
||||||
|
"SPF",
|
||||||
|
"SRV",
|
||||||
|
"NS",
|
||||||
|
}
|
||||||
|
|
||||||
|
invalidTypes := []string{
|
||||||
|
"a",
|
||||||
|
"alias",
|
||||||
|
"SpF",
|
||||||
|
"Txt",
|
||||||
|
"AaAA",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range validTypes {
|
||||||
|
_, errors := validateRoute53RecordType(v, "route53_record")
|
||||||
|
if len(errors) != 0 {
|
||||||
|
t.Fatalf("%q should be a valid Route53 record type: %v", v, errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range invalidTypes {
|
||||||
|
_, errors := validateRoute53RecordType(v, "route53_record")
|
||||||
|
if len(errors) == 0 {
|
||||||
|
t.Fatalf("%q should not be a valid Route53 record type", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -138,18 +138,31 @@ func (c *Config) getArmClient() (*ArmClient, error) {
|
||||||
subscriptionId: c.SubscriptionID,
|
subscriptionId: c.SubscriptionID,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// detect cloud from environment
|
||||||
|
env, envErr := azure.EnvironmentFromName(c.Environment)
|
||||||
|
if envErr != nil {
|
||||||
|
// try again with wrapped value to support readable values like german instead of AZUREGERMANCLOUD
|
||||||
|
wrapped := fmt.Sprintf("AZURE%sCLOUD", c.Environment)
|
||||||
|
var innerErr error
|
||||||
|
if env, innerErr = azure.EnvironmentFromName(wrapped); innerErr != nil {
|
||||||
|
return nil, envErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rivieraClient, err := riviera.NewClient(&riviera.AzureResourceManagerCredentials{
|
rivieraClient, err := riviera.NewClient(&riviera.AzureResourceManagerCredentials{
|
||||||
ClientID: c.ClientID,
|
ClientID: c.ClientID,
|
||||||
ClientSecret: c.ClientSecret,
|
ClientSecret: c.ClientSecret,
|
||||||
TenantID: c.TenantID,
|
TenantID: c.TenantID,
|
||||||
SubscriptionID: c.SubscriptionID,
|
SubscriptionID: c.SubscriptionID,
|
||||||
|
ResourceManagerEndpoint: env.ResourceManagerEndpoint,
|
||||||
|
ActiveDirectoryEndpoint: env.ActiveDirectoryEndpoint,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error creating Riviera client: %s", err)
|
return nil, fmt.Errorf("Error creating Riviera client: %s", err)
|
||||||
}
|
}
|
||||||
client.rivieraClient = rivieraClient
|
client.rivieraClient = rivieraClient
|
||||||
|
|
||||||
oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(c.TenantID)
|
oauthConfig, err := env.OAuthConfigForTenant(c.TenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -159,267 +172,268 @@ func (c *Config) getArmClient() (*ArmClient, error) {
|
||||||
return nil, fmt.Errorf("Unable to configure OAuthConfig for tenant %s", c.TenantID)
|
return nil, fmt.Errorf("Unable to configure OAuthConfig for tenant %s", c.TenantID)
|
||||||
}
|
}
|
||||||
|
|
||||||
spt, err := azure.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret,
|
spt, err := azure.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, env.ResourceManagerEndpoint)
|
||||||
azure.PublicCloud.ResourceManagerEndpoint)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
endpoint := env.ResourceManagerEndpoint
|
||||||
|
|
||||||
// NOTE: these declarations should be left separate for clarity should the
|
// NOTE: these declarations should be left separate for clarity should the
|
||||||
// clients be wished to be configured with custom Responders/PollingModess etc...
|
// clients be wished to be configured with custom Responders/PollingModess etc...
|
||||||
asc := compute.NewAvailabilitySetsClient(c.SubscriptionID)
|
asc := compute.NewAvailabilitySetsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&asc.Client)
|
setUserAgent(&asc.Client)
|
||||||
asc.Authorizer = spt
|
asc.Authorizer = spt
|
||||||
asc.Sender = autorest.CreateSender(withRequestLogging())
|
asc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.availSetClient = asc
|
client.availSetClient = asc
|
||||||
|
|
||||||
uoc := compute.NewUsageOperationsClient(c.SubscriptionID)
|
uoc := compute.NewUsageOperationsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&uoc.Client)
|
setUserAgent(&uoc.Client)
|
||||||
uoc.Authorizer = spt
|
uoc.Authorizer = spt
|
||||||
uoc.Sender = autorest.CreateSender(withRequestLogging())
|
uoc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.usageOpsClient = uoc
|
client.usageOpsClient = uoc
|
||||||
|
|
||||||
vmeic := compute.NewVirtualMachineExtensionImagesClient(c.SubscriptionID)
|
vmeic := compute.NewVirtualMachineExtensionImagesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vmeic.Client)
|
setUserAgent(&vmeic.Client)
|
||||||
vmeic.Authorizer = spt
|
vmeic.Authorizer = spt
|
||||||
vmeic.Sender = autorest.CreateSender(withRequestLogging())
|
vmeic.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vmExtensionImageClient = vmeic
|
client.vmExtensionImageClient = vmeic
|
||||||
|
|
||||||
vmec := compute.NewVirtualMachineExtensionsClient(c.SubscriptionID)
|
vmec := compute.NewVirtualMachineExtensionsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vmec.Client)
|
setUserAgent(&vmec.Client)
|
||||||
vmec.Authorizer = spt
|
vmec.Authorizer = spt
|
||||||
vmec.Sender = autorest.CreateSender(withRequestLogging())
|
vmec.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vmExtensionClient = vmec
|
client.vmExtensionClient = vmec
|
||||||
|
|
||||||
vmic := compute.NewVirtualMachineImagesClient(c.SubscriptionID)
|
vmic := compute.NewVirtualMachineImagesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vmic.Client)
|
setUserAgent(&vmic.Client)
|
||||||
vmic.Authorizer = spt
|
vmic.Authorizer = spt
|
||||||
vmic.Sender = autorest.CreateSender(withRequestLogging())
|
vmic.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vmImageClient = vmic
|
client.vmImageClient = vmic
|
||||||
|
|
||||||
vmssc := compute.NewVirtualMachineScaleSetsClient(c.SubscriptionID)
|
vmssc := compute.NewVirtualMachineScaleSetsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vmssc.Client)
|
setUserAgent(&vmssc.Client)
|
||||||
vmssc.Authorizer = spt
|
vmssc.Authorizer = spt
|
||||||
vmssc.Sender = autorest.CreateSender(withRequestLogging())
|
vmssc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vmScaleSetClient = vmssc
|
client.vmScaleSetClient = vmssc
|
||||||
|
|
||||||
vmc := compute.NewVirtualMachinesClient(c.SubscriptionID)
|
vmc := compute.NewVirtualMachinesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vmc.Client)
|
setUserAgent(&vmc.Client)
|
||||||
vmc.Authorizer = spt
|
vmc.Authorizer = spt
|
||||||
vmc.Sender = autorest.CreateSender(withRequestLogging())
|
vmc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vmClient = vmc
|
client.vmClient = vmc
|
||||||
|
|
||||||
agc := network.NewApplicationGatewaysClient(c.SubscriptionID)
|
agc := network.NewApplicationGatewaysClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&agc.Client)
|
setUserAgent(&agc.Client)
|
||||||
agc.Authorizer = spt
|
agc.Authorizer = spt
|
||||||
agc.Sender = autorest.CreateSender(withRequestLogging())
|
agc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.appGatewayClient = agc
|
client.appGatewayClient = agc
|
||||||
|
|
||||||
crc := containerregistry.NewRegistriesClient(c.SubscriptionID)
|
crc := containerregistry.NewRegistriesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&crc.Client)
|
setUserAgent(&crc.Client)
|
||||||
crc.Authorizer = spt
|
crc.Authorizer = spt
|
||||||
crc.Sender = autorest.CreateSender(withRequestLogging())
|
crc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.containerRegistryClient = crc
|
client.containerRegistryClient = crc
|
||||||
|
|
||||||
ehc := eventhub.NewEventHubsClient(c.SubscriptionID)
|
ehc := eventhub.NewEventHubsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&ehc.Client)
|
setUserAgent(&ehc.Client)
|
||||||
ehc.Authorizer = spt
|
ehc.Authorizer = spt
|
||||||
ehc.Sender = autorest.CreateSender(withRequestLogging())
|
ehc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.eventHubClient = ehc
|
client.eventHubClient = ehc
|
||||||
|
|
||||||
chcgc := eventhub.NewConsumerGroupsClient(c.SubscriptionID)
|
chcgc := eventhub.NewConsumerGroupsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&chcgc.Client)
|
setUserAgent(&chcgc.Client)
|
||||||
chcgc.Authorizer = spt
|
chcgc.Authorizer = spt
|
||||||
chcgc.Sender = autorest.CreateSender(withRequestLogging())
|
chcgc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.eventHubConsumerGroupClient = chcgc
|
client.eventHubConsumerGroupClient = chcgc
|
||||||
|
|
||||||
ehnc := eventhub.NewNamespacesClient(c.SubscriptionID)
|
ehnc := eventhub.NewNamespacesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&ehnc.Client)
|
setUserAgent(&ehnc.Client)
|
||||||
ehnc.Authorizer = spt
|
ehnc.Authorizer = spt
|
||||||
ehnc.Sender = autorest.CreateSender(withRequestLogging())
|
ehnc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.eventHubNamespacesClient = ehnc
|
client.eventHubNamespacesClient = ehnc
|
||||||
|
|
||||||
ifc := network.NewInterfacesClient(c.SubscriptionID)
|
ifc := network.NewInterfacesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&ifc.Client)
|
setUserAgent(&ifc.Client)
|
||||||
ifc.Authorizer = spt
|
ifc.Authorizer = spt
|
||||||
ifc.Sender = autorest.CreateSender(withRequestLogging())
|
ifc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.ifaceClient = ifc
|
client.ifaceClient = ifc
|
||||||
|
|
||||||
lbc := network.NewLoadBalancersClient(c.SubscriptionID)
|
lbc := network.NewLoadBalancersClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&lbc.Client)
|
setUserAgent(&lbc.Client)
|
||||||
lbc.Authorizer = spt
|
lbc.Authorizer = spt
|
||||||
lbc.Sender = autorest.CreateSender(withRequestLogging())
|
lbc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.loadBalancerClient = lbc
|
client.loadBalancerClient = lbc
|
||||||
|
|
||||||
lgc := network.NewLocalNetworkGatewaysClient(c.SubscriptionID)
|
lgc := network.NewLocalNetworkGatewaysClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&lgc.Client)
|
setUserAgent(&lgc.Client)
|
||||||
lgc.Authorizer = spt
|
lgc.Authorizer = spt
|
||||||
lgc.Sender = autorest.CreateSender(withRequestLogging())
|
lgc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.localNetConnClient = lgc
|
client.localNetConnClient = lgc
|
||||||
|
|
||||||
pipc := network.NewPublicIPAddressesClient(c.SubscriptionID)
|
pipc := network.NewPublicIPAddressesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&pipc.Client)
|
setUserAgent(&pipc.Client)
|
||||||
pipc.Authorizer = spt
|
pipc.Authorizer = spt
|
||||||
pipc.Sender = autorest.CreateSender(withRequestLogging())
|
pipc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.publicIPClient = pipc
|
client.publicIPClient = pipc
|
||||||
|
|
||||||
sgc := network.NewSecurityGroupsClient(c.SubscriptionID)
|
sgc := network.NewSecurityGroupsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&sgc.Client)
|
setUserAgent(&sgc.Client)
|
||||||
sgc.Authorizer = spt
|
sgc.Authorizer = spt
|
||||||
sgc.Sender = autorest.CreateSender(withRequestLogging())
|
sgc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.secGroupClient = sgc
|
client.secGroupClient = sgc
|
||||||
|
|
||||||
src := network.NewSecurityRulesClient(c.SubscriptionID)
|
src := network.NewSecurityRulesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&src.Client)
|
setUserAgent(&src.Client)
|
||||||
src.Authorizer = spt
|
src.Authorizer = spt
|
||||||
src.Sender = autorest.CreateSender(withRequestLogging())
|
src.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.secRuleClient = src
|
client.secRuleClient = src
|
||||||
|
|
||||||
snc := network.NewSubnetsClient(c.SubscriptionID)
|
snc := network.NewSubnetsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&snc.Client)
|
setUserAgent(&snc.Client)
|
||||||
snc.Authorizer = spt
|
snc.Authorizer = spt
|
||||||
snc.Sender = autorest.CreateSender(withRequestLogging())
|
snc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.subnetClient = snc
|
client.subnetClient = snc
|
||||||
|
|
||||||
vgcc := network.NewVirtualNetworkGatewayConnectionsClient(c.SubscriptionID)
|
vgcc := network.NewVirtualNetworkGatewayConnectionsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vgcc.Client)
|
setUserAgent(&vgcc.Client)
|
||||||
vgcc.Authorizer = spt
|
vgcc.Authorizer = spt
|
||||||
vgcc.Sender = autorest.CreateSender(withRequestLogging())
|
vgcc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vnetGatewayConnectionsClient = vgcc
|
client.vnetGatewayConnectionsClient = vgcc
|
||||||
|
|
||||||
vgc := network.NewVirtualNetworkGatewaysClient(c.SubscriptionID)
|
vgc := network.NewVirtualNetworkGatewaysClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vgc.Client)
|
setUserAgent(&vgc.Client)
|
||||||
vgc.Authorizer = spt
|
vgc.Authorizer = spt
|
||||||
vgc.Sender = autorest.CreateSender(withRequestLogging())
|
vgc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vnetGatewayClient = vgc
|
client.vnetGatewayClient = vgc
|
||||||
|
|
||||||
vnc := network.NewVirtualNetworksClient(c.SubscriptionID)
|
vnc := network.NewVirtualNetworksClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vnc.Client)
|
setUserAgent(&vnc.Client)
|
||||||
vnc.Authorizer = spt
|
vnc.Authorizer = spt
|
||||||
vnc.Sender = autorest.CreateSender(withRequestLogging())
|
vnc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vnetClient = vnc
|
client.vnetClient = vnc
|
||||||
|
|
||||||
vnpc := network.NewVirtualNetworkPeeringsClient(c.SubscriptionID)
|
vnpc := network.NewVirtualNetworkPeeringsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&vnpc.Client)
|
setUserAgent(&vnpc.Client)
|
||||||
vnpc.Authorizer = spt
|
vnpc.Authorizer = spt
|
||||||
vnpc.Sender = autorest.CreateSender(withRequestLogging())
|
vnpc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.vnetPeeringsClient = vnpc
|
client.vnetPeeringsClient = vnpc
|
||||||
|
|
||||||
rtc := network.NewRouteTablesClient(c.SubscriptionID)
|
rtc := network.NewRouteTablesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&rtc.Client)
|
setUserAgent(&rtc.Client)
|
||||||
rtc.Authorizer = spt
|
rtc.Authorizer = spt
|
||||||
rtc.Sender = autorest.CreateSender(withRequestLogging())
|
rtc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.routeTablesClient = rtc
|
client.routeTablesClient = rtc
|
||||||
|
|
||||||
rc := network.NewRoutesClient(c.SubscriptionID)
|
rc := network.NewRoutesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&rc.Client)
|
setUserAgent(&rc.Client)
|
||||||
rc.Authorizer = spt
|
rc.Authorizer = spt
|
||||||
rc.Sender = autorest.CreateSender(withRequestLogging())
|
rc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.routesClient = rc
|
client.routesClient = rc
|
||||||
|
|
||||||
rgc := resources.NewGroupsClient(c.SubscriptionID)
|
rgc := resources.NewGroupsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&rgc.Client)
|
setUserAgent(&rgc.Client)
|
||||||
rgc.Authorizer = spt
|
rgc.Authorizer = spt
|
||||||
rgc.Sender = autorest.CreateSender(withRequestLogging())
|
rgc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.resourceGroupClient = rgc
|
client.resourceGroupClient = rgc
|
||||||
|
|
||||||
pc := resources.NewProvidersClient(c.SubscriptionID)
|
pc := resources.NewProvidersClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&pc.Client)
|
setUserAgent(&pc.Client)
|
||||||
pc.Authorizer = spt
|
pc.Authorizer = spt
|
||||||
pc.Sender = autorest.CreateSender(withRequestLogging())
|
pc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.providers = pc
|
client.providers = pc
|
||||||
|
|
||||||
tc := resources.NewTagsClient(c.SubscriptionID)
|
tc := resources.NewTagsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&tc.Client)
|
setUserAgent(&tc.Client)
|
||||||
tc.Authorizer = spt
|
tc.Authorizer = spt
|
||||||
tc.Sender = autorest.CreateSender(withRequestLogging())
|
tc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.tagsClient = tc
|
client.tagsClient = tc
|
||||||
|
|
||||||
rf := resources.NewClient(c.SubscriptionID)
|
rf := resources.NewClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&rf.Client)
|
setUserAgent(&rf.Client)
|
||||||
rf.Authorizer = spt
|
rf.Authorizer = spt
|
||||||
rf.Sender = autorest.CreateSender(withRequestLogging())
|
rf.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.resourceFindClient = rf
|
client.resourceFindClient = rf
|
||||||
|
|
||||||
jc := scheduler.NewJobsClient(c.SubscriptionID)
|
jc := scheduler.NewJobsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&jc.Client)
|
setUserAgent(&jc.Client)
|
||||||
jc.Authorizer = spt
|
jc.Authorizer = spt
|
||||||
jc.Sender = autorest.CreateSender(withRequestLogging())
|
jc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.jobsClient = jc
|
client.jobsClient = jc
|
||||||
|
|
||||||
jcc := scheduler.NewJobCollectionsClient(c.SubscriptionID)
|
jcc := scheduler.NewJobCollectionsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&jcc.Client)
|
setUserAgent(&jcc.Client)
|
||||||
jcc.Authorizer = spt
|
jcc.Authorizer = spt
|
||||||
jcc.Sender = autorest.CreateSender(withRequestLogging())
|
jcc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.jobsCollectionsClient = jcc
|
client.jobsCollectionsClient = jcc
|
||||||
|
|
||||||
ssc := storage.NewAccountsClient(c.SubscriptionID)
|
ssc := storage.NewAccountsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&ssc.Client)
|
setUserAgent(&ssc.Client)
|
||||||
ssc.Authorizer = spt
|
ssc.Authorizer = spt
|
||||||
ssc.Sender = autorest.CreateSender(withRequestLogging())
|
ssc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.storageServiceClient = ssc
|
client.storageServiceClient = ssc
|
||||||
|
|
||||||
suc := storage.NewUsageOperationsClient(c.SubscriptionID)
|
suc := storage.NewUsageOperationsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&suc.Client)
|
setUserAgent(&suc.Client)
|
||||||
suc.Authorizer = spt
|
suc.Authorizer = spt
|
||||||
suc.Sender = autorest.CreateSender(withRequestLogging())
|
suc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.storageUsageClient = suc
|
client.storageUsageClient = suc
|
||||||
|
|
||||||
cpc := cdn.NewProfilesClient(c.SubscriptionID)
|
cpc := cdn.NewProfilesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&cpc.Client)
|
setUserAgent(&cpc.Client)
|
||||||
cpc.Authorizer = spt
|
cpc.Authorizer = spt
|
||||||
cpc.Sender = autorest.CreateSender(withRequestLogging())
|
cpc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.cdnProfilesClient = cpc
|
client.cdnProfilesClient = cpc
|
||||||
|
|
||||||
cec := cdn.NewEndpointsClient(c.SubscriptionID)
|
cec := cdn.NewEndpointsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&cec.Client)
|
setUserAgent(&cec.Client)
|
||||||
cec.Authorizer = spt
|
cec.Authorizer = spt
|
||||||
cec.Sender = autorest.CreateSender(withRequestLogging())
|
cec.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.cdnEndpointsClient = cec
|
client.cdnEndpointsClient = cec
|
||||||
|
|
||||||
dc := resources.NewDeploymentsClient(c.SubscriptionID)
|
dc := resources.NewDeploymentsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&dc.Client)
|
setUserAgent(&dc.Client)
|
||||||
dc.Authorizer = spt
|
dc.Authorizer = spt
|
||||||
dc.Sender = autorest.CreateSender(withRequestLogging())
|
dc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.deploymentsClient = dc
|
client.deploymentsClient = dc
|
||||||
|
|
||||||
tmpc := trafficmanager.NewProfilesClient(c.SubscriptionID)
|
tmpc := trafficmanager.NewProfilesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&tmpc.Client)
|
setUserAgent(&tmpc.Client)
|
||||||
tmpc.Authorizer = spt
|
tmpc.Authorizer = spt
|
||||||
tmpc.Sender = autorest.CreateSender(withRequestLogging())
|
tmpc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.trafficManagerProfilesClient = tmpc
|
client.trafficManagerProfilesClient = tmpc
|
||||||
|
|
||||||
tmec := trafficmanager.NewEndpointsClient(c.SubscriptionID)
|
tmec := trafficmanager.NewEndpointsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&tmec.Client)
|
setUserAgent(&tmec.Client)
|
||||||
tmec.Authorizer = spt
|
tmec.Authorizer = spt
|
||||||
tmec.Sender = autorest.CreateSender(withRequestLogging())
|
tmec.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.trafficManagerEndpointsClient = tmec
|
client.trafficManagerEndpointsClient = tmec
|
||||||
|
|
||||||
rdc := redis.NewClient(c.SubscriptionID)
|
rdc := redis.NewClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&rdc.Client)
|
setUserAgent(&rdc.Client)
|
||||||
rdc.Authorizer = spt
|
rdc.Authorizer = spt
|
||||||
rdc.Sender = autorest.CreateSender(withRequestLogging())
|
rdc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.redisClient = rdc
|
client.redisClient = rdc
|
||||||
|
|
||||||
sbnc := servicebus.NewNamespacesClient(c.SubscriptionID)
|
sbnc := servicebus.NewNamespacesClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&sbnc.Client)
|
setUserAgent(&sbnc.Client)
|
||||||
sbnc.Authorizer = spt
|
sbnc.Authorizer = spt
|
||||||
sbnc.Sender = autorest.CreateSender(withRequestLogging())
|
sbnc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.serviceBusNamespacesClient = sbnc
|
client.serviceBusNamespacesClient = sbnc
|
||||||
|
|
||||||
sbtc := servicebus.NewTopicsClient(c.SubscriptionID)
|
sbtc := servicebus.NewTopicsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&sbtc.Client)
|
setUserAgent(&sbtc.Client)
|
||||||
sbtc.Authorizer = spt
|
sbtc.Authorizer = spt
|
||||||
sbtc.Sender = autorest.CreateSender(withRequestLogging())
|
sbtc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.serviceBusTopicsClient = sbtc
|
client.serviceBusTopicsClient = sbtc
|
||||||
|
|
||||||
sbsc := servicebus.NewSubscriptionsClient(c.SubscriptionID)
|
sbsc := servicebus.NewSubscriptionsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&sbsc.Client)
|
setUserAgent(&sbsc.Client)
|
||||||
sbsc.Authorizer = spt
|
sbsc.Authorizer = spt
|
||||||
sbsc.Sender = autorest.CreateSender(withRequestLogging())
|
sbsc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
client.serviceBusSubscriptionsClient = sbsc
|
client.serviceBusSubscriptionsClient = sbsc
|
||||||
|
|
||||||
kvc := keyvault.NewVaultsClient(c.SubscriptionID)
|
kvc := keyvault.NewVaultsClientWithBaseURI(endpoint, c.SubscriptionID)
|
||||||
setUserAgent(&kvc.Client)
|
setUserAgent(&kvc.Client)
|
||||||
kvc.Authorizer = spt
|
kvc.Authorizer = spt
|
||||||
kvc.Sender = autorest.CreateSender(withRequestLogging())
|
kvc.Sender = autorest.CreateSender(withRequestLogging())
|
||||||
|
|
|
@ -45,6 +45,12 @@ func Provider() terraform.ResourceProvider {
|
||||||
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
|
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"environment": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
|
||||||
|
},
|
||||||
|
|
||||||
"skip_provider_registration": {
|
"skip_provider_registration": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -134,6 +140,7 @@ type Config struct {
|
||||||
ClientID string
|
ClientID string
|
||||||
ClientSecret string
|
ClientSecret string
|
||||||
TenantID string
|
TenantID string
|
||||||
|
Environment string
|
||||||
SkipProviderRegistration bool
|
SkipProviderRegistration bool
|
||||||
|
|
||||||
validateCredentialsOnce sync.Once
|
validateCredentialsOnce sync.Once
|
||||||
|
@ -154,6 +161,9 @@ func (c *Config) validate() error {
|
||||||
if c.TenantID == "" {
|
if c.TenantID == "" {
|
||||||
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
|
err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider"))
|
||||||
}
|
}
|
||||||
|
if c.Environment == "" {
|
||||||
|
err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider"))
|
||||||
|
}
|
||||||
|
|
||||||
return err.ErrorOrNil()
|
return err.ErrorOrNil()
|
||||||
}
|
}
|
||||||
|
@ -165,6 +175,7 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
|
||||||
ClientID: d.Get("client_id").(string),
|
ClientID: d.Get("client_id").(string),
|
||||||
ClientSecret: d.Get("client_secret").(string),
|
ClientSecret: d.Get("client_secret").(string),
|
||||||
TenantID: d.Get("tenant_id").(string),
|
TenantID: d.Get("tenant_id").(string),
|
||||||
|
Environment: d.Get("environment").(string),
|
||||||
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
|
SkipProviderRegistration: d.Get("skip_provider_registration").(bool),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ func resourceArmCdnProfileCreate(d *schema.ResourceData, meta interface{}) error
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if read.ID == nil {
|
if read.ID == nil {
|
||||||
return fmt.Errorf("Cannot read CND Profile %s (resource group %s) ID", name, resGroup)
|
return fmt.Errorf("Cannot read CDN Profile %s (resource group %s) ID", name, resGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(*read.ID)
|
d.SetId(*read.ID)
|
||||||
|
|
|
@ -74,12 +74,15 @@ func resourceArmLoadBalancerBackendAddressPoolCreate(d *schema.ResourceData, met
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, exists = findLoadBalancerBackEndAddressPoolByName(loadBalancer, d.Get("name").(string))
|
backendAddressPools := append(*loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools, expandAzureRmLoadBalancerBackendAddressPools(d))
|
||||||
|
existingPool, existingPoolIndex, exists := findLoadBalancerBackEndAddressPoolByName(loadBalancer, d.Get("name").(string))
|
||||||
if exists {
|
if exists {
|
||||||
return fmt.Errorf("A BackEnd Address Pool with name %q already exists.", d.Get("name").(string))
|
if d.Get("name").(string) == *existingPool.Name {
|
||||||
|
// this pool is being updated/reapplied remove old copy from the slice
|
||||||
|
backendAddressPools = append(backendAddressPools[:existingPoolIndex], backendAddressPools[existingPoolIndex+1:]...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
backendAddressPools := append(*loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools, expandAzureRmLoadBalancerBackendAddressPools(d))
|
|
||||||
loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools = &backendAddressPools
|
loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools = &backendAddressPools
|
||||||
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
|
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -60,6 +60,40 @@ func TestAccAzureRMLoadBalancerBackEndAddressPool_removal(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAzureRMLoadBalancerBackEndAddressPool_reapply(t *testing.T) {
|
||||||
|
var lb network.LoadBalancer
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
addressPoolName := fmt.Sprintf("%d-address-pool", ri)
|
||||||
|
|
||||||
|
deleteAddressPoolState := func(s *terraform.State) error {
|
||||||
|
return s.Remove("azurerm_lb_backend_address_pool.test")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName, &lb),
|
||||||
|
deleteAddressPoolState,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName, &lb),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc {
|
func testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
_, _, exists := findLoadBalancerBackEndAddressPoolByName(lb, addressPoolName)
|
_, _, exists := findLoadBalancerBackEndAddressPoolByName(lb, addressPoolName)
|
||||||
|
|
|
@ -100,11 +100,9 @@ func resourceArmLoadBalancerNatPoolCreate(d *schema.ResourceData, meta interface
|
||||||
|
|
||||||
existingNatPool, existingNatPoolIndex, exists := findLoadBalancerNatPoolByName(loadBalancer, d.Get("name").(string))
|
existingNatPool, existingNatPoolIndex, exists := findLoadBalancerNatPoolByName(loadBalancer, d.Get("name").(string))
|
||||||
if exists {
|
if exists {
|
||||||
if d.Id() == *existingNatPool.ID {
|
if d.Get("name").(string) == *existingNatPool.Name {
|
||||||
// this probe is being updated remove old copy from the slice
|
// this probe is being updated/reapplied remove old copy from the slice
|
||||||
natPools = append(natPools[:existingNatPoolIndex], natPools[existingNatPoolIndex+1:]...)
|
natPools = append(natPools[:existingNatPoolIndex], natPools[existingNatPoolIndex+1:]...)
|
||||||
} else {
|
|
||||||
return fmt.Errorf("A NAT Pool with name %q already exists.", d.Get("name").(string))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
@ -102,23 +100,35 @@ func TestAccAzureRMLoadBalancerNatPool_update(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAzureRMLoadBalancerNatPool_duplicate(t *testing.T) {
|
func TestAccAzureRMLoadBalancerNatPool_reapply(t *testing.T) {
|
||||||
var lb network.LoadBalancer
|
var lb network.LoadBalancer
|
||||||
ri := acctest.RandInt()
|
ri := acctest.RandInt()
|
||||||
natPoolName := fmt.Sprintf("NatPool-%d", ri)
|
natPoolName := fmt.Sprintf("NatPool-%d", ri)
|
||||||
|
|
||||||
|
deleteNatPoolState := func(s *terraform.State) error {
|
||||||
|
return s.Remove("azurerm_lb_nat_pool.test")
|
||||||
|
}
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
{
|
||||||
Config: testAccAzureRMLoadBalancerNatPool_multiplePools(ri, natPoolName, natPoolName),
|
Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb),
|
||||||
|
deleteNatPoolState,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb),
|
testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb),
|
||||||
),
|
),
|
||||||
ExpectError: regexp.MustCompile(fmt.Sprintf("A NAT Pool with name %q already exists.", natPoolName)),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -100,11 +100,9 @@ func resourceArmLoadBalancerNatRuleCreate(d *schema.ResourceData, meta interface
|
||||||
|
|
||||||
existingNatRule, existingNatRuleIndex, exists := findLoadBalancerNatRuleByName(loadBalancer, d.Get("name").(string))
|
existingNatRule, existingNatRuleIndex, exists := findLoadBalancerNatRuleByName(loadBalancer, d.Get("name").(string))
|
||||||
if exists {
|
if exists {
|
||||||
if d.Id() == *existingNatRule.ID {
|
if d.Get("name").(string) == *existingNatRule.Name {
|
||||||
// this probe is being updated remove old copy from the slice
|
// this probe is being updated/reapplied remove old copy from the slice
|
||||||
natRules = append(natRules[:existingNatRuleIndex], natRules[existingNatRuleIndex+1:]...)
|
natRules = append(natRules[:existingNatRuleIndex], natRules[existingNatRuleIndex+1:]...)
|
||||||
} else {
|
|
||||||
return fmt.Errorf("A NAT Rule with name %q already exists.", d.Get("name").(string))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
@ -104,23 +102,35 @@ func TestAccAzureRMLoadBalancerNatRule_update(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAzureRMLoadBalancerNatRule_duplicate(t *testing.T) {
|
func TestAccAzureRMLoadBalancerNatRule_reapply(t *testing.T) {
|
||||||
var lb network.LoadBalancer
|
var lb network.LoadBalancer
|
||||||
ri := acctest.RandInt()
|
ri := acctest.RandInt()
|
||||||
natRuleName := fmt.Sprintf("NatRule-%d", ri)
|
natRuleName := fmt.Sprintf("NatRule-%d", ri)
|
||||||
|
|
||||||
|
deleteNatRuleState := func(s *terraform.State) error {
|
||||||
|
return s.Remove("azurerm_lb_nat_rule.test")
|
||||||
|
}
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
{
|
||||||
Config: testAccAzureRMLoadBalancerNatRule_multipleRules(ri, natRuleName, natRuleName),
|
Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb),
|
||||||
|
deleteNatRuleState,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb),
|
testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb),
|
||||||
),
|
),
|
||||||
ExpectError: regexp.MustCompile(fmt.Sprintf("A NAT Rule with name %q already exists.", natRuleName)),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -54,7 +54,6 @@ func resourceArmLoadBalancerProbe() *schema.Resource {
|
||||||
"request_path": {
|
"request_path": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"interval_in_seconds": {
|
"interval_in_seconds": {
|
||||||
|
@ -106,11 +105,9 @@ func resourceArmLoadBalancerProbeCreate(d *schema.ResourceData, meta interface{}
|
||||||
|
|
||||||
existingProbe, existingProbeIndex, exists := findLoadBalancerProbeByName(loadBalancer, d.Get("name").(string))
|
existingProbe, existingProbeIndex, exists := findLoadBalancerProbeByName(loadBalancer, d.Get("name").(string))
|
||||||
if exists {
|
if exists {
|
||||||
if d.Id() == *existingProbe.ID {
|
if d.Get("name").(string) == *existingProbe.Name {
|
||||||
// this probe is being updated remove old copy from the slice
|
// this probe is being updated/reapplied remove old copy from the slice
|
||||||
probes = append(probes[:existingProbeIndex], probes[existingProbeIndex+1:]...)
|
probes = append(probes[:existingProbeIndex], probes[existingProbeIndex+1:]...)
|
||||||
} else {
|
|
||||||
return fmt.Errorf("A Probe with name %q already exists.", d.Get("name").(string))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
@ -102,7 +100,7 @@ func TestAccAzureRMLoadBalancerProbe_update(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAzureRMLoadBalancerProbe_duplicate(t *testing.T) {
|
func TestAccAzureRMLoadBalancerProbe_updateProtocol(t *testing.T) {
|
||||||
var lb network.LoadBalancer
|
var lb network.LoadBalancer
|
||||||
ri := acctest.RandInt()
|
ri := acctest.RandInt()
|
||||||
probeName := fmt.Sprintf("probe-%d", ri)
|
probeName := fmt.Sprintf("probe-%d", ri)
|
||||||
|
@ -113,12 +111,54 @@ func TestAccAzureRMLoadBalancerProbe_duplicate(t *testing.T) {
|
||||||
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
{
|
||||||
Config: testAccAzureRMLoadBalancerProbe_multipleProbes(ri, probeName, probeName),
|
Config: testAccAzureRMLoadBalancerProbe_updateProtocolBefore(ri, probeName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerProbeExists(probeName, &lb),
|
||||||
|
resource.TestCheckResourceAttr("azurerm_lb_probe.test", "protocol", "Http"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerProbe_updateProtocolAfter(ri, probeName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerProbeExists(probeName, &lb),
|
||||||
|
resource.TestCheckResourceAttr("azurerm_lb_probe.test", "protocol", "Tcp"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAzureRMLoadBalancerProbe_reapply(t *testing.T) {
|
||||||
|
var lb network.LoadBalancer
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
probeName := fmt.Sprintf("probe-%d", ri)
|
||||||
|
|
||||||
|
deleteProbeState := func(s *terraform.State) error {
|
||||||
|
return s.Remove("azurerm_lb_probe.test")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerProbeExists(probeName, &lb),
|
||||||
|
deleteProbeState,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
testCheckAzureRMLoadBalancerProbeExists(probeName, &lb),
|
testCheckAzureRMLoadBalancerProbeExists(probeName, &lb),
|
||||||
),
|
),
|
||||||
ExpectError: regexp.MustCompile(fmt.Sprintf("A Probe with name %q already exists.", probeName)),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -293,3 +333,76 @@ resource "azurerm_lb_probe" "test2" {
|
||||||
}
|
}
|
||||||
`, rInt, rInt, rInt, rInt, probeName, probe2Name)
|
`, rInt, rInt, rInt, rInt, probeName, probe2Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccAzureRMLoadBalancerProbe_updateProtocolBefore(rInt int, probeName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "azurerm_resource_group" "test" {
|
||||||
|
name = "acctestrg-%d"
|
||||||
|
location = "West US"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "azurerm_public_ip" "test" {
|
||||||
|
name = "test-ip-%d"
|
||||||
|
location = "West US"
|
||||||
|
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||||
|
public_ip_address_allocation = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "azurerm_lb" "test" {
|
||||||
|
name = "arm-test-loadbalancer-%d"
|
||||||
|
location = "West US"
|
||||||
|
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||||
|
|
||||||
|
frontend_ip_configuration {
|
||||||
|
name = "one-%d"
|
||||||
|
public_ip_address_id = "${azurerm_public_ip.test.id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "azurerm_lb_probe" "test" {
|
||||||
|
location = "West US"
|
||||||
|
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||||
|
loadbalancer_id = "${azurerm_lb.test.id}"
|
||||||
|
name = "%s"
|
||||||
|
protocol = "Http"
|
||||||
|
request_path = "/"
|
||||||
|
port = 80
|
||||||
|
}
|
||||||
|
`, rInt, rInt, rInt, rInt, probeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAzureRMLoadBalancerProbe_updateProtocolAfter(rInt int, probeName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "azurerm_resource_group" "test" {
|
||||||
|
name = "acctestrg-%d"
|
||||||
|
location = "West US"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "azurerm_public_ip" "test" {
|
||||||
|
name = "test-ip-%d"
|
||||||
|
location = "West US"
|
||||||
|
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||||
|
public_ip_address_allocation = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "azurerm_lb" "test" {
|
||||||
|
name = "arm-test-loadbalancer-%d"
|
||||||
|
location = "West US"
|
||||||
|
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||||
|
|
||||||
|
frontend_ip_configuration {
|
||||||
|
name = "one-%d"
|
||||||
|
public_ip_address_id = "${azurerm_public_ip.test.id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "azurerm_lb_probe" "test" {
|
||||||
|
location = "West US"
|
||||||
|
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||||
|
loadbalancer_id = "${azurerm_lb.test.id}"
|
||||||
|
name = "%s"
|
||||||
|
protocol = "Tcp"
|
||||||
|
port = 80
|
||||||
|
}
|
||||||
|
`, rInt, rInt, rInt, rInt, probeName)
|
||||||
|
}
|
||||||
|
|
|
@ -127,11 +127,9 @@ func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{})
|
||||||
|
|
||||||
existingRule, existingRuleIndex, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
|
existingRule, existingRuleIndex, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
|
||||||
if exists {
|
if exists {
|
||||||
if d.Id() == *existingRule.ID {
|
if d.Get("name").(string) == *existingRule.Name {
|
||||||
// this rule is being updated remove old copy from the slice
|
// this rule is being updated/reapplied remove old copy from the slice
|
||||||
lbRules = append(lbRules[:existingRuleIndex], lbRules[existingRuleIndex+1:]...)
|
lbRules = append(lbRules[:existingRuleIndex], lbRules[existingRuleIndex+1:]...)
|
||||||
} else {
|
|
||||||
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
@ -199,15 +197,14 @@ func TestAccAzureRMLoadBalancerRule_update(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAzureRMLoadBalancerRule_duplicateRules(t *testing.T) {
|
func TestAccAzureRMLoadBalancerRule_reapply(t *testing.T) {
|
||||||
var lb network.LoadBalancer
|
var lb network.LoadBalancer
|
||||||
ri := acctest.RandInt()
|
ri := acctest.RandInt()
|
||||||
lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha))
|
lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha))
|
||||||
|
|
||||||
subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
|
deleteRuleState := func(s *terraform.State) error {
|
||||||
lbRuleID := fmt.Sprintf(
|
return s.Remove("azurerm_lb_rule.test")
|
||||||
"/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/loadBalancingRules/%s",
|
}
|
||||||
subscriptionID, ri, ri, lbRuleName)
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
@ -215,13 +212,20 @@ func TestAccAzureRMLoadBalancerRule_duplicateRules(t *testing.T) {
|
||||||
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
{
|
||||||
Config: testAccAzureRMLoadBalancerRule_multipleRules(ri, lbRuleName, lbRuleName),
|
Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
|
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
|
||||||
|
deleteRuleState,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
|
||||||
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
|
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
|
||||||
resource.TestCheckResourceAttr("azurerm_lb_rule.test", "id", lbRuleID),
|
|
||||||
),
|
),
|
||||||
ExpectError: regexp.MustCompile(fmt.Sprintf("A LoadBalancer Rule with name %q already exists.", lbRuleName)),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -197,6 +197,12 @@ func resourceArmVirtualMachine() *schema.Resource {
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"caching": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"disk_size_gb": {
|
"disk_size_gb": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -864,6 +870,7 @@ func flattenAzureRmVirtualMachineDataDisk(disks *[]compute.DataDisk) interface{}
|
||||||
l["name"] = *disk.Name
|
l["name"] = *disk.Name
|
||||||
l["vhd_uri"] = *disk.Vhd.URI
|
l["vhd_uri"] = *disk.Vhd.URI
|
||||||
l["create_option"] = disk.CreateOption
|
l["create_option"] = disk.CreateOption
|
||||||
|
l["caching"] = string(disk.Caching)
|
||||||
if disk.DiskSizeGB != nil {
|
if disk.DiskSizeGB != nil {
|
||||||
l["disk_size_gb"] = *disk.DiskSizeGB
|
l["disk_size_gb"] = *disk.DiskSizeGB
|
||||||
}
|
}
|
||||||
|
@ -1197,6 +1204,10 @@ func expandAzureRmVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.Data
|
||||||
CreateOption: compute.DiskCreateOptionTypes(createOption),
|
CreateOption: compute.DiskCreateOptionTypes(createOption),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v := config["caching"].(string); v != "" {
|
||||||
|
data_disk.Caching = compute.CachingTypes(v)
|
||||||
|
}
|
||||||
|
|
||||||
if v := config["disk_size_gb"]; v != nil {
|
if v := config["disk_size_gb"]; v != nil {
|
||||||
diskSize := int32(config["disk_size_gb"].(int))
|
diskSize := int32(config["disk_size_gb"].(int))
|
||||||
data_disk.DiskSizeGB = &diskSize
|
data_disk.DiskSizeGB = &diskSize
|
||||||
|
|
|
@ -1019,6 +1019,7 @@ resource "azurerm_virtual_machine" "test" {
|
||||||
vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/mydatadisk1.vhd"
|
vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/mydatadisk1.vhd"
|
||||||
disk_size_gb = "1023"
|
disk_size_gb = "1023"
|
||||||
create_option = "Empty"
|
create_option = "Empty"
|
||||||
|
caching = "ReadWrite"
|
||||||
lun = 0
|
lun = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -450,6 +450,7 @@ resource "digitalocean_droplet" "foobar" {
|
||||||
size = "1gb"
|
size = "1gb"
|
||||||
image = "centos-7-x64"
|
image = "centos-7-x64"
|
||||||
region = "nyc3"
|
region = "nyc3"
|
||||||
|
user_data = "foobar"
|
||||||
ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
|
ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
|
||||||
resize_disk = false
|
resize_disk = false
|
||||||
}
|
}
|
||||||
|
|
|
@ -242,7 +242,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
|
||||||
d.Set("instance_group", manager.InstanceGroup)
|
d.Set("instance_group", manager.InstanceGroup)
|
||||||
d.Set("target_size", manager.TargetSize)
|
d.Set("target_size", manager.TargetSize)
|
||||||
d.Set("self_link", manager.SelfLink)
|
d.Set("self_link", manager.SelfLink)
|
||||||
d.Set("update_strategy", "RESTART") //this field doesn't match the manager api, set to default value
|
update_strategy, ok := d.GetOk("update_strategy")
|
||||||
|
if !ok {
|
||||||
|
update_strategy = "RESTART"
|
||||||
|
}
|
||||||
|
d.Set("update_strategy", update_strategy.(string))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,6 +112,29 @@ func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccInstanceGroupManager_updateStrategy(t *testing.T) {
|
||||||
|
var manager compute.InstanceGroupManager
|
||||||
|
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccInstanceGroupManager_updateStrategy(igm),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckInstanceGroupManagerExists(
|
||||||
|
"google_compute_instance_group_manager.igm-update-strategy", &manager),
|
||||||
|
testAccCheckInstanceGroupManagerUpdateStrategy(
|
||||||
|
"google_compute_instance_group_manager.igm-update-strategy", "NONE"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
|
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
@ -268,6 +291,25 @@ func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resou
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckInstanceGroupManagerUpdateStrategy(n, strategy string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.Attributes["update_strategy"] != strategy {
|
||||||
|
return fmt.Errorf("Expected strategy to be %s, got %s",
|
||||||
|
strategy, rs.Primary.Attributes["update_strategy"])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string {
|
func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_instance_template" "igm-basic" {
|
resource "google_compute_instance_template" "igm-basic" {
|
||||||
|
@ -488,6 +530,47 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string {
|
||||||
}`, tag, igm)
|
}`, tag, igm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccInstanceGroupManager_updateStrategy(igm string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance_template" "igm-update-strategy" {
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
can_ip_forward = false
|
||||||
|
tags = ["terraform-testing"]
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
service_account {
|
||||||
|
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_group_manager" "igm-update-strategy" {
|
||||||
|
description = "Terraform test instance group manager"
|
||||||
|
name = "%s"
|
||||||
|
instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}"
|
||||||
|
base_instance_name = "igm-update-strategy"
|
||||||
|
zone = "us-central1-c"
|
||||||
|
target_size = 2
|
||||||
|
update_strategy = "NONE"
|
||||||
|
named_port {
|
||||||
|
name = "customhttp"
|
||||||
|
port = 8080
|
||||||
|
}
|
||||||
|
}`, igm)
|
||||||
|
}
|
||||||
|
|
||||||
func resourceSplitter(resource string) string {
|
func resourceSplitter(resource string) string {
|
||||||
splits := strings.Split(resource, "/")
|
splits := strings.Split(resource, "/")
|
||||||
|
|
||||||
|
|
|
@ -203,6 +203,12 @@ func resourceComputeInstanceTemplate() *schema.Resource {
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"subnetwork_project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"access_config": &schema.Schema{
|
"access_config": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -406,14 +412,16 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network
|
||||||
for i := 0; i < networksCount; i++ {
|
for i := 0; i < networksCount; i++ {
|
||||||
prefix := fmt.Sprintf("network_interface.%d", i)
|
prefix := fmt.Sprintf("network_interface.%d", i)
|
||||||
|
|
||||||
var networkName, subnetworkName string
|
var networkName, subnetworkName, subnetworkProject string
|
||||||
if v, ok := d.GetOk(prefix + ".network"); ok {
|
if v, ok := d.GetOk(prefix + ".network"); ok {
|
||||||
networkName = v.(string)
|
networkName = v.(string)
|
||||||
}
|
}
|
||||||
if v, ok := d.GetOk(prefix + ".subnetwork"); ok {
|
if v, ok := d.GetOk(prefix + ".subnetwork"); ok {
|
||||||
subnetworkName = v.(string)
|
subnetworkName = v.(string)
|
||||||
}
|
}
|
||||||
|
if v, ok := d.GetOk(prefix + ".subnetwork_project"); ok {
|
||||||
|
subnetworkProject = v.(string)
|
||||||
|
}
|
||||||
if networkName == "" && subnetworkName == "" {
|
if networkName == "" && subnetworkName == "" {
|
||||||
return nil, fmt.Errorf("network or subnetwork must be provided")
|
return nil, fmt.Errorf("network or subnetwork must be provided")
|
||||||
}
|
}
|
||||||
|
@ -435,8 +443,11 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if subnetworkProject == "" {
|
||||||
|
subnetworkProject = project
|
||||||
|
}
|
||||||
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||||
project, region, subnetworkName).Do()
|
subnetworkProject, region, subnetworkName).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error referencing subnetwork '%s' in region '%s': %s",
|
"Error referencing subnetwork '%s' in region '%s': %s",
|
||||||
|
@ -639,6 +650,7 @@ func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]
|
||||||
subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/")
|
subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/")
|
||||||
networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1]
|
networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1]
|
||||||
region = subnetworkUrl[len(subnetworkUrl)-3]
|
region = subnetworkUrl[len(subnetworkUrl)-3]
|
||||||
|
networkInterfaceMap["subnetwork_project"] = subnetworkUrl[len(subnetworkUrl)-5]
|
||||||
}
|
}
|
||||||
|
|
||||||
if networkInterface.AccessConfigs != nil {
|
if networkInterface.AccessConfigs != nil {
|
||||||
|
|
|
@ -2,6 +2,7 @@ package google
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -115,6 +116,27 @@ func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) {
|
||||||
|
var instanceTemplate compute.InstanceTemplate
|
||||||
|
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceTemplateExists(
|
||||||
|
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||||
|
testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) {
|
func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) {
|
||||||
var instanceTemplate compute.InstanceTemplate
|
var instanceTemplate compute.InstanceTemplate
|
||||||
|
|
||||||
|
@ -467,6 +489,45 @@ resource "google_compute_instance_template" "foobar" {
|
||||||
}
|
}
|
||||||
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "network" {
|
||||||
|
name = "network-%s"
|
||||||
|
auto_create_subnetworks = false
|
||||||
|
project = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_subnetwork" "subnetwork" {
|
||||||
|
name = "subnetwork-%s"
|
||||||
|
ip_cidr_range = "10.0.0.0/24"
|
||||||
|
region = "us-central1"
|
||||||
|
network = "${google_compute_network.network.self_link}"
|
||||||
|
project = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_template" "foobar" {
|
||||||
|
name = "instance-test-%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
region = "us-central1"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
disk_size_gb = 10
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
subnetwork = "${google_compute_subnetwork.subnetwork.name}"
|
||||||
|
subnetwork_project = "${google_compute_subnetwork.subnetwork.project}"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10))
|
||||||
|
}
|
||||||
|
|
||||||
var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(`
|
var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(`
|
||||||
resource "google_compute_instance_template" "foobar" {
|
resource "google_compute_instance_template" "foobar" {
|
||||||
name = "instance-test-%s"
|
name = "instance-test-%s"
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
|
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
|
||||||
|
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceLoadBalancerV2() *schema.Resource {
|
func resourceLoadBalancerV2() *schema.Resource {
|
||||||
|
@ -80,6 +81,13 @@ func resourceLoadBalancerV2() *schema.Resource {
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"security_group_ids": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -126,6 +134,13 @@ func resourceLoadBalancerV2Create(d *schema.ResourceData, meta interface{}) erro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Once the loadbalancer has been created, apply any requested security groups
|
||||||
|
// to the port that was created behind the scenes.
|
||||||
|
if err := resourceLoadBalancerV2SecurityGroups(networkingClient, lb.VipPortID, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all has been successful, set the ID on the resource
|
||||||
d.SetId(lb.ID)
|
d.SetId(lb.ID)
|
||||||
|
|
||||||
return resourceLoadBalancerV2Read(d, meta)
|
return resourceLoadBalancerV2Read(d, meta)
|
||||||
|
@ -155,6 +170,16 @@ func resourceLoadBalancerV2Read(d *schema.ResourceData, meta interface{}) error
|
||||||
d.Set("flavor", lb.Flavor)
|
d.Set("flavor", lb.Flavor)
|
||||||
d.Set("provider", lb.Provider)
|
d.Set("provider", lb.Provider)
|
||||||
|
|
||||||
|
// Get any security groups on the VIP Port
|
||||||
|
if lb.VipPortID != "" {
|
||||||
|
port, err := ports.Get(networkingClient, lb.VipPortID).Extract()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("security_group_ids", port.SecurityGroups)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,6 +209,14 @@ func resourceLoadBalancerV2Update(d *schema.ResourceData, meta interface{}) erro
|
||||||
return fmt.Errorf("Error updating OpenStack LBaaSV2 LoadBalancer: %s", err)
|
return fmt.Errorf("Error updating OpenStack LBaaSV2 LoadBalancer: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Security Groups get updated separately
|
||||||
|
if d.HasChange("security_group_ids") {
|
||||||
|
vipPortID := d.Get("vip_port_id").(string)
|
||||||
|
if err := resourceLoadBalancerV2SecurityGroups(networkingClient, vipPortID, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resourceLoadBalancerV2Read(d, meta)
|
return resourceLoadBalancerV2Read(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,6 +245,26 @@ func resourceLoadBalancerV2Delete(d *schema.ResourceData, meta interface{}) erro
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceLoadBalancerV2SecurityGroups(networkingClient *gophercloud.ServiceClient, vipPortID string, d *schema.ResourceData) error {
|
||||||
|
if vipPortID != "" {
|
||||||
|
if _, ok := d.GetOk("security_group_ids"); ok {
|
||||||
|
updateOpts := ports.UpdateOpts{
|
||||||
|
SecurityGroups: resourcePortSecurityGroupsV2(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Adding security groups to OpenStack LoadBalancer "+
|
||||||
|
"VIP Port (%s): %#v", vipPortID, updateOpts)
|
||||||
|
|
||||||
|
_, err := ports.Update(networkingClient, vipPortID, updateOpts).Extract()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func waitForLoadBalancerActive(networkingClient *gophercloud.ServiceClient, lbID string) resource.StateRefreshFunc {
|
func waitForLoadBalancerActive(networkingClient *gophercloud.ServiceClient, lbID string) resource.StateRefreshFunc {
|
||||||
return func() (interface{}, string, error) {
|
return func() (interface{}, string, error) {
|
||||||
lb, err := loadbalancers.Get(networkingClient, lbID).Extract()
|
lb, err := loadbalancers.Get(networkingClient, lbID).Extract()
|
||||||
|
|
|
@ -5,9 +5,12 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
|
||||||
|
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups"
|
||||||
|
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccLBV2LoadBalancer_basic(t *testing.T) {
|
func TestAccLBV2LoadBalancer_basic(t *testing.T) {
|
||||||
|
@ -19,13 +22,13 @@ func TestAccLBV2LoadBalancer_basic(t *testing.T) {
|
||||||
CheckDestroy: testAccCheckLBV2LoadBalancerDestroy,
|
CheckDestroy: testAccCheckLBV2LoadBalancerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: TestAccLBV2LoadBalancerConfig_basic,
|
Config: testAccLBV2LoadBalancerConfig_basic,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckLBV2LoadBalancerExists("openstack_lb_loadbalancer_v2.loadbalancer_1", &lb),
|
testAccCheckLBV2LoadBalancerExists("openstack_lb_loadbalancer_v2.loadbalancer_1", &lb),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: TestAccLBV2LoadBalancerConfig_update,
|
Config: testAccLBV2LoadBalancerConfig_update,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"openstack_lb_loadbalancer_v2.loadbalancer_1", "name", "loadbalancer_1_updated"),
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", "name", "loadbalancer_1_updated"),
|
||||||
|
@ -38,6 +41,62 @@ func TestAccLBV2LoadBalancer_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccLBV2LoadBalancer_secGroup(t *testing.T) {
|
||||||
|
var lb loadbalancers.LoadBalancer
|
||||||
|
var sg_1, sg_2 groups.SecGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckLBV2LoadBalancerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccLBV2LoadBalancer_secGroup,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckLBV2LoadBalancerExists(
|
||||||
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", &lb),
|
||||||
|
testAccCheckNetworkingV2SecGroupExists(
|
||||||
|
"openstack_networking_secgroup_v2.secgroup_1", &sg_1),
|
||||||
|
testAccCheckNetworkingV2SecGroupExists(
|
||||||
|
"openstack_networking_secgroup_v2.secgroup_1", &sg_2),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", "security_group_ids.#", "1"),
|
||||||
|
testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_1),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccLBV2LoadBalancer_secGroup_update1,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckLBV2LoadBalancerExists(
|
||||||
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", &lb),
|
||||||
|
testAccCheckNetworkingV2SecGroupExists(
|
||||||
|
"openstack_networking_secgroup_v2.secgroup_2", &sg_1),
|
||||||
|
testAccCheckNetworkingV2SecGroupExists(
|
||||||
|
"openstack_networking_secgroup_v2.secgroup_2", &sg_2),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", "security_group_ids.#", "2"),
|
||||||
|
testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_1),
|
||||||
|
testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_2),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccLBV2LoadBalancer_secGroup_update2,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckLBV2LoadBalancerExists(
|
||||||
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", &lb),
|
||||||
|
testAccCheckNetworkingV2SecGroupExists(
|
||||||
|
"openstack_networking_secgroup_v2.secgroup_2", &sg_1),
|
||||||
|
testAccCheckNetworkingV2SecGroupExists(
|
||||||
|
"openstack_networking_secgroup_v2.secgroup_2", &sg_2),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"openstack_lb_loadbalancer_v2.loadbalancer_1", "security_group_ids.#", "1"),
|
||||||
|
testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_2),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckLBV2LoadBalancerDestroy(s *terraform.State) error {
|
func testAccCheckLBV2LoadBalancerDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
|
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
|
||||||
|
@ -59,7 +118,8 @@ func testAccCheckLBV2LoadBalancerDestroy(s *terraform.State) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckLBV2LoadBalancerExists(n string, lb *loadbalancers.LoadBalancer) resource.TestCheckFunc {
|
func testAccCheckLBV2LoadBalancerExists(
|
||||||
|
n string, lb *loadbalancers.LoadBalancer) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -91,7 +151,31 @@ func testAccCheckLBV2LoadBalancerExists(n string, lb *loadbalancers.LoadBalancer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const TestAccLBV2LoadBalancerConfig_basic = `
|
func testAccCheckLBV2LoadBalancerHasSecGroup(
|
||||||
|
lb *loadbalancers.LoadBalancer, sg *groups.SecGroup) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
port, err := ports.Get(networkingClient, lb.VipPortID).Extract()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range port.SecurityGroups {
|
||||||
|
if p == sg.ID {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("LoadBalancer does not have the security group")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccLBV2LoadBalancerConfig_basic = `
|
||||||
resource "openstack_networking_network_v2" "network_1" {
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
name = "network_1"
|
name = "network_1"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
|
@ -110,7 +194,7 @@ resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
const TestAccLBV2LoadBalancerConfig_update = `
|
const testAccLBV2LoadBalancerConfig_update = `
|
||||||
resource "openstack_networking_network_v2" "network_1" {
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
name = "network_1"
|
name = "network_1"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
|
@ -129,3 +213,98 @@ resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" {
|
||||||
vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccLBV2LoadBalancer_secGroup = `
|
||||||
|
resource "openstack_networking_secgroup_v2" "secgroup_1" {
|
||||||
|
name = "secgroup_1"
|
||||||
|
description = "secgroup_1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "secgroup_2" {
|
||||||
|
name = "secgroup_2"
|
||||||
|
description = "secgroup_2"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
|
name = "network_1"
|
||||||
|
admin_state_up = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_subnet_v2" "subnet_1" {
|
||||||
|
name = "subnet_1"
|
||||||
|
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||||
|
cidr = "192.168.199.0/24"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" {
|
||||||
|
name = "loadbalancer_1"
|
||||||
|
vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||||
|
security_group_ids = [
|
||||||
|
"${openstack_networking_secgroup_v2.secgroup_1.id}"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccLBV2LoadBalancer_secGroup_update1 = `
|
||||||
|
resource "openstack_networking_secgroup_v2" "secgroup_1" {
|
||||||
|
name = "secgroup_1"
|
||||||
|
description = "secgroup_1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "secgroup_2" {
|
||||||
|
name = "secgroup_2"
|
||||||
|
description = "secgroup_2"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
|
name = "network_1"
|
||||||
|
admin_state_up = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_subnet_v2" "subnet_1" {
|
||||||
|
name = "subnet_1"
|
||||||
|
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||||
|
cidr = "192.168.199.0/24"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" {
|
||||||
|
name = "loadbalancer_1"
|
||||||
|
vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||||
|
security_group_ids = [
|
||||||
|
"${openstack_networking_secgroup_v2.secgroup_1.id}",
|
||||||
|
"${openstack_networking_secgroup_v2.secgroup_2.id}"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccLBV2LoadBalancer_secGroup_update2 = `
|
||||||
|
resource "openstack_networking_secgroup_v2" "secgroup_1" {
|
||||||
|
name = "secgroup_1"
|
||||||
|
description = "secgroup_1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "secgroup_2" {
|
||||||
|
name = "secgroup_2"
|
||||||
|
description = "secgroup_2"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
|
name = "network_1"
|
||||||
|
admin_state_up = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_subnet_v2" "subnet_1" {
|
||||||
|
name = "subnet_1"
|
||||||
|
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||||
|
cidr = "192.168.199.0/24"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" {
|
||||||
|
name = "loadbalancer_1"
|
||||||
|
vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||||
|
security_group_ids = [
|
||||||
|
"${openstack_networking_secgroup_v2.secgroup_2.id}"
|
||||||
|
]
|
||||||
|
depends_on = ["openstack_networking_secgroup_v2.secgroup_1"]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/opsgenie/opsgenie-go-sdk/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OpsGenieClient struct {
|
||||||
|
apiKey string
|
||||||
|
|
||||||
|
StopContext context.Context
|
||||||
|
|
||||||
|
teams client.OpsGenieTeamClient
|
||||||
|
users client.OpsGenieUserClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines the configuration options for the OpsGenie client
|
||||||
|
type Config struct {
|
||||||
|
ApiKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client returns a new OpsGenie client
|
||||||
|
func (c *Config) Client() (*OpsGenieClient, error) {
|
||||||
|
opsGenie := new(client.OpsGenieClient)
|
||||||
|
opsGenie.SetAPIKey(c.ApiKey)
|
||||||
|
client := OpsGenieClient{}
|
||||||
|
|
||||||
|
log.Printf("[INFO] OpsGenie client configured")
|
||||||
|
|
||||||
|
teamsClient, err := opsGenie.Team()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.teams = *teamsClient
|
||||||
|
|
||||||
|
usersClient, err := opsGenie.User()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.users = *usersClient
|
||||||
|
|
||||||
|
return &client, nil
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/opsgenie/opsgenie-go-sdk/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceOpsGenieUser() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceOpsGenieUserRead,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"username": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"full_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceOpsGenieUserRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).users
|
||||||
|
|
||||||
|
username := d.Get("username").(string)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Reading OpsGenie user '%s'", username)
|
||||||
|
|
||||||
|
o := user.ListUsersRequest{}
|
||||||
|
resp, err := client.List(o)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var found *user.GetUserResponse
|
||||||
|
|
||||||
|
if len(resp.Users) > 0 {
|
||||||
|
for _, user := range resp.Users {
|
||||||
|
if user.Username == username {
|
||||||
|
found = &user
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found == nil {
|
||||||
|
return fmt.Errorf("Unable to locate any user with the username: %s", username)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(found.Id)
|
||||||
|
d.Set("username", found.Username)
|
||||||
|
d.Set("full_name", found.Fullname)
|
||||||
|
d.Set("role", found.Role)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccDataSourceOpsGenieUser_Basic(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccDataSourceOpsGenieUserConfig(ri),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccDataSourceOpsGenieUser("opsgenie_user.test", "data.opsgenie_user.by_username"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceOpsGenieUser(src, n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
|
||||||
|
srcR := s.RootModule().Resources[src]
|
||||||
|
srcA := srcR.Primary.Attributes
|
||||||
|
|
||||||
|
r := s.RootModule().Resources[n]
|
||||||
|
a := r.Primary.Attributes
|
||||||
|
|
||||||
|
if a["id"] == "" {
|
||||||
|
return fmt.Errorf("Expected to get a user ID from OpsGenie")
|
||||||
|
}
|
||||||
|
|
||||||
|
testAtts := []string{"username", "full_name", "role"}
|
||||||
|
|
||||||
|
for _, att := range testAtts {
|
||||||
|
if a[att] != srcA[att] {
|
||||||
|
return fmt.Errorf("Expected the user %s to be: %s, but got: %s", att, srcA[att], a[att])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceOpsGenieUserConfig(ri int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "opsgenie_user" "test" {
|
||||||
|
username = "acctest-%d@example.tld"
|
||||||
|
full_name = "Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "opsgenie_user" "by_username" {
|
||||||
|
username = "${opsgenie_user.test.username}"
|
||||||
|
}
|
||||||
|
`, ri)
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_importBasic(t *testing.T) {
|
||||||
|
resourceName := "opsgenie_team.test"
|
||||||
|
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_basic, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_importWithUser(t *testing.T) {
|
||||||
|
resourceName := "opsgenie_team.test"
|
||||||
|
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_withUser, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_importWithUserComplete(t *testing.T) {
|
||||||
|
resourceName := "opsgenie_team.test"
|
||||||
|
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_withUserComplete, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccOpsGenieUser_importBasic(t *testing.T) {
|
||||||
|
resourceName := "opsgenie_user.test"
|
||||||
|
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieUser_basic, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieUserDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieUser_importComplete(t *testing.T) {
|
||||||
|
resourceName := "opsgenie_user.test"
|
||||||
|
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieUser_complete, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieUserDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provider represents a resource provider in Terraform
|
||||||
|
func Provider() terraform.ResourceProvider {
|
||||||
|
return &schema.Provider{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"api_key": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("OPSGENIE_API_KEY", nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
DataSourcesMap: map[string]*schema.Resource{
|
||||||
|
"opsgenie_user": dataSourceOpsGenieUser(),
|
||||||
|
},
|
||||||
|
|
||||||
|
ResourcesMap: map[string]*schema.Resource{
|
||||||
|
"opsgenie_team": resourceOpsGenieTeam(),
|
||||||
|
"opsgenie_user": resourceOpsGenieUser(),
|
||||||
|
},
|
||||||
|
|
||||||
|
ConfigureFunc: providerConfigure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func providerConfigure(data *schema.ResourceData) (interface{}, error) {
|
||||||
|
log.Println("[INFO] Initializing OpsGenie client")
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
ApiKey: data.Get("api_key").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
return config.Client()
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testAccProviders map[string]terraform.ResourceProvider
|
||||||
|
var testAccProvider *schema.Provider
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
testAccProvider = Provider().(*schema.Provider)
|
||||||
|
testAccProviders = map[string]terraform.ResourceProvider{
|
||||||
|
"opsgenie": testAccProvider,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvider(t *testing.T) {
|
||||||
|
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvider_impl(t *testing.T) {
|
||||||
|
var _ terraform.ResourceProvider = Provider()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccPreCheck(t *testing.T) {
|
||||||
|
apiKey := os.Getenv("OPSGENIE_API_KEY")
|
||||||
|
|
||||||
|
if apiKey == "" {
|
||||||
|
t.Fatal("OPSGENIE_API_KEY must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,231 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/opsgenie/opsgenie-go-sdk/team"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceOpsGenieTeam() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceOpsGenieTeamCreate,
|
||||||
|
Read: resourceOpsGenieTeamRead,
|
||||||
|
Update: resourceOpsGenieTeamUpdate,
|
||||||
|
Delete: resourceOpsGenieTeamDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validateOpsGenieTeamName,
|
||||||
|
},
|
||||||
|
"member": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"username": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "user",
|
||||||
|
ValidateFunc: validateOpsGenieTeamRole,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieTeamCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).teams
|
||||||
|
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
|
||||||
|
createRequest := team.CreateTeamRequest{
|
||||||
|
Name: name,
|
||||||
|
Members: expandOpsGenieTeamMembers(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Creating OpsGenie team '%s'", name)
|
||||||
|
|
||||||
|
createResponse, err := client.Create(createRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkOpsGenieResponse(createResponse.Code, createResponse.Status)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
getRequest := team.GetTeamRequest{
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
getResponse, err := client.Get(getRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(getResponse.Id)
|
||||||
|
|
||||||
|
return resourceOpsGenieTeamRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieTeamRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).teams
|
||||||
|
|
||||||
|
listRequest := team.ListTeamsRequest{}
|
||||||
|
listResponse, err := client.List(listRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var found *team.GetTeamResponse
|
||||||
|
for _, team := range listResponse.Teams {
|
||||||
|
if team.Id == d.Id() {
|
||||||
|
found = &team
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found == nil {
|
||||||
|
d.SetId("")
|
||||||
|
log.Printf("[INFO] Team %q not found. Removing from state", d.Get("name").(string))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
getRequest := team.GetTeamRequest{
|
||||||
|
Id: d.Id(),
|
||||||
|
}
|
||||||
|
|
||||||
|
getResponse, err := client.Get(getRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("name", getResponse.Name)
|
||||||
|
d.Set("member", flattenOpsGenieTeamMembers(getResponse.Members))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieTeamUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).teams
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
|
||||||
|
updateRequest := team.UpdateTeamRequest{
|
||||||
|
Id: d.Id(),
|
||||||
|
Name: name,
|
||||||
|
Members: expandOpsGenieTeamMembers(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Updating OpsGenie team '%s'", name)
|
||||||
|
|
||||||
|
updateResponse, err := client.Update(updateRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkOpsGenieResponse(updateResponse.Code, updateResponse.Status)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieTeamDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[INFO] Deleting OpsGenie team '%s'", d.Get("name").(string))
|
||||||
|
client := meta.(*OpsGenieClient).teams
|
||||||
|
|
||||||
|
deleteRequest := team.DeleteTeamRequest{
|
||||||
|
Id: d.Id(),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.Delete(deleteRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenOpsGenieTeamMembers(input []team.Member) []interface{} {
|
||||||
|
members := make([]interface{}, 0, len(input))
|
||||||
|
for _, inputMember := range input {
|
||||||
|
outputMember := make(map[string]interface{})
|
||||||
|
outputMember["username"] = inputMember.User
|
||||||
|
outputMember["role"] = inputMember.Role
|
||||||
|
|
||||||
|
members = append(members, outputMember)
|
||||||
|
}
|
||||||
|
|
||||||
|
return members
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandOpsGenieTeamMembers(d *schema.ResourceData) []team.Member {
|
||||||
|
input := d.Get("member").([]interface{})
|
||||||
|
|
||||||
|
members := make([]team.Member, 0, len(input))
|
||||||
|
if input == nil {
|
||||||
|
return members
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range input {
|
||||||
|
config := v.(map[string]interface{})
|
||||||
|
|
||||||
|
username := config["username"].(string)
|
||||||
|
role := config["role"].(string)
|
||||||
|
|
||||||
|
member := team.Member{
|
||||||
|
User: username,
|
||||||
|
Role: role,
|
||||||
|
}
|
||||||
|
|
||||||
|
members = append(members, member)
|
||||||
|
}
|
||||||
|
|
||||||
|
return members
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpsGenieTeamName(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[a-zA-Z0-9_]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only alpha numeric characters and underscores are allowed in %q: %q", k, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(value) >= 100 {
|
||||||
|
errors = append(errors, fmt.Errorf("%q cannot be longer than 100 characters: %q %d", k, value, len(value)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpsGenieTeamRole(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := strings.ToLower(v.(string))
|
||||||
|
families := map[string]bool{
|
||||||
|
"admin": true,
|
||||||
|
"user": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if !families[value] {
|
||||||
|
errors = append(errors, fmt.Errorf("OpsGenie Team Role can only be 'Admin' or 'User'"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,270 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"github.com/opsgenie/opsgenie-go-sdk/team"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeamName_validation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "hello-world",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "hello_world",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "helloWorld",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "helloworld12",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "hello@world",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd3324120",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOpsGenieTeamName(tc.Value, "opsgenie_team")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the OpsGenie Team Name to trigger a validation error: %v", errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeamRole_validation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "admin",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "user",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "custom",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOpsGenieTeamRole(tc.Value, "opsgenie_team")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the OpsGenie Team Role to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_basic(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_basic, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckOpsGenieTeamExists("opsgenie_team.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_withUser(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_withUser, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckOpsGenieTeamExists("opsgenie_team.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_withUserComplete(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_withUserComplete, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckOpsGenieTeamExists("opsgenie_team.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieTeam_withMultipleUsers(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieTeam_withMultipleUsers, ri, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieTeamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckOpsGenieTeamExists("opsgenie_team.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCheckOpsGenieTeamDestroy(s *terraform.State) error {
|
||||||
|
client := testAccProvider.Meta().(*OpsGenieClient).teams
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "opsgenie_team" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
req := team.GetTeamRequest{
|
||||||
|
Id: rs.Primary.Attributes["id"],
|
||||||
|
}
|
||||||
|
|
||||||
|
result, _ := client.Get(req)
|
||||||
|
if result != nil {
|
||||||
|
return fmt.Errorf("Team still exists:\n%#v", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCheckOpsGenieTeamExists(name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
// Ensure we have enough information in state to look up in API
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := rs.Primary.Attributes["id"]
|
||||||
|
name := rs.Primary.Attributes["name"]
|
||||||
|
|
||||||
|
client := testAccProvider.Meta().(*OpsGenieClient).teams
|
||||||
|
|
||||||
|
req := team.GetTeamRequest{
|
||||||
|
Id: rs.Primary.Attributes["id"],
|
||||||
|
}
|
||||||
|
|
||||||
|
result, _ := client.Get(req)
|
||||||
|
if result == nil {
|
||||||
|
return fmt.Errorf("Bad: Team %q (name: %q) does not exist", id, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccOpsGenieTeam_basic = `
|
||||||
|
resource "opsgenie_team" "test" {
|
||||||
|
name = "acctest%d"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccOpsGenieTeam_withUser = `
|
||||||
|
resource "opsgenie_user" "test" {
|
||||||
|
username = "acctest-%d@example.tld"
|
||||||
|
full_name = "Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "opsgenie_team" "test" {
|
||||||
|
name = "acctest%d"
|
||||||
|
member {
|
||||||
|
username = "${opsgenie_user.test.username}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccOpsGenieTeam_withUserComplete = `
|
||||||
|
resource "opsgenie_user" "test" {
|
||||||
|
username = "acctest-%d@example.tld"
|
||||||
|
full_name = "Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "opsgenie_team" "test" {
|
||||||
|
name = "acctest%d"
|
||||||
|
member {
|
||||||
|
username = "${opsgenie_user.test.username}"
|
||||||
|
role = "user"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccOpsGenieTeam_withMultipleUsers = `
|
||||||
|
resource "opsgenie_user" "first" {
|
||||||
|
username = "acctest-1-%d@example.tld"
|
||||||
|
full_name = "First Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
}
|
||||||
|
resource "opsgenie_user" "second" {
|
||||||
|
username = "acctest-2-%d@example.tld"
|
||||||
|
full_name = "Second Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "opsgenie_team" "test" {
|
||||||
|
name = "acctest%d"
|
||||||
|
member {
|
||||||
|
username = "${opsgenie_user.first.username}"
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
username = "${opsgenie_user.second.username}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
|
@ -0,0 +1,211 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/opsgenie/opsgenie-go-sdk/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceOpsGenieUser() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceOpsGenieUserCreate,
|
||||||
|
Read: resourceOpsGenieUserRead,
|
||||||
|
Update: resourceOpsGenieUserUpdate,
|
||||||
|
Delete: resourceOpsGenieUserDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"username": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ForceNew: true,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validateOpsGenieUserUsername,
|
||||||
|
},
|
||||||
|
"full_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validateOpsGenieUserFullName,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validateOpsGenieUserRole,
|
||||||
|
},
|
||||||
|
"locale": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "en_US",
|
||||||
|
},
|
||||||
|
"timezone": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "America/New_York",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieUserCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).users
|
||||||
|
|
||||||
|
username := d.Get("username").(string)
|
||||||
|
fullName := d.Get("full_name").(string)
|
||||||
|
role := d.Get("role").(string)
|
||||||
|
locale := d.Get("locale").(string)
|
||||||
|
timeZone := d.Get("timezone").(string)
|
||||||
|
|
||||||
|
createRequest := user.CreateUserRequest{
|
||||||
|
Username: username,
|
||||||
|
Fullname: fullName,
|
||||||
|
Role: role,
|
||||||
|
Locale: locale,
|
||||||
|
Timezone: timeZone,
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Creating OpsGenie user '%s'", username)
|
||||||
|
createResponse, err := client.Create(createRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkOpsGenieResponse(createResponse.Code, createResponse.Status)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
getRequest := user.GetUserRequest{
|
||||||
|
Username: username,
|
||||||
|
}
|
||||||
|
|
||||||
|
getResponse, err := client.Get(getRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(getResponse.Id)
|
||||||
|
|
||||||
|
return resourceOpsGenieUserRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieUserRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).users
|
||||||
|
|
||||||
|
listRequest := user.ListUsersRequest{}
|
||||||
|
listResponse, err := client.List(listRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var found *user.GetUserResponse
|
||||||
|
for _, user := range listResponse.Users {
|
||||||
|
if user.Id == d.Id() {
|
||||||
|
found = &user
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found == nil {
|
||||||
|
d.SetId("")
|
||||||
|
log.Printf("[INFO] User %q not found. Removing from state", d.Get("username").(string))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
getRequest := user.GetUserRequest{
|
||||||
|
Id: d.Id(),
|
||||||
|
}
|
||||||
|
|
||||||
|
getResponse, err := client.Get(getRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("username", getResponse.Username)
|
||||||
|
d.Set("full_name", getResponse.Fullname)
|
||||||
|
d.Set("role", getResponse.Role)
|
||||||
|
d.Set("locale", getResponse.Locale)
|
||||||
|
d.Set("timezone", getResponse.Timezone)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieUserUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*OpsGenieClient).users
|
||||||
|
|
||||||
|
username := d.Get("username").(string)
|
||||||
|
fullName := d.Get("full_name").(string)
|
||||||
|
role := d.Get("role").(string)
|
||||||
|
locale := d.Get("locale").(string)
|
||||||
|
timeZone := d.Get("timezone").(string)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Updating OpsGenie user '%s'", username)
|
||||||
|
|
||||||
|
updateRequest := user.UpdateUserRequest{
|
||||||
|
Id: d.Id(),
|
||||||
|
Fullname: fullName,
|
||||||
|
Role: role,
|
||||||
|
Locale: locale,
|
||||||
|
Timezone: timeZone,
|
||||||
|
}
|
||||||
|
|
||||||
|
updateResponse, err := client.Update(updateRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkOpsGenieResponse(updateResponse.Code, updateResponse.Status)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceOpsGenieUserDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[INFO] Deleting OpsGenie user '%s'", d.Get("username").(string))
|
||||||
|
client := meta.(*OpsGenieClient).users
|
||||||
|
|
||||||
|
deleteRequest := user.DeleteUserRequest{
|
||||||
|
Id: d.Id(),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.Delete(deleteRequest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpsGenieUserUsername(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
|
||||||
|
if len(value) >= 100 {
|
||||||
|
errors = append(errors, fmt.Errorf("%q cannot be longer than 100 characters: %q %d", k, value, len(value)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpsGenieUserFullName(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
|
||||||
|
if len(value) >= 512 {
|
||||||
|
errors = append(errors, fmt.Errorf("%q cannot be longer than 512 characters: %q %d", k, value, len(value)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpsGenieUserRole(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
|
||||||
|
if len(value) >= 512 {
|
||||||
|
errors = append(errors, fmt.Errorf("%q cannot be longer than 512 characters: %q %d", k, value, len(value)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,206 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"github.com/opsgenie/opsgenie-go-sdk/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccOpsGenieUserUsername_validation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "hello",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(99),
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(100),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOpsGenieUserUsername(tc.Value, "opsgenie_team")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the OpsGenie User Username Validation to trigger a validation error: %v", errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieUserFullName_validation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "hello",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(100),
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(511),
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(512),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOpsGenieUserFullName(tc.Value, "opsgenie_team")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the OpsGenie User Full Name Validation to trigger a validation error: %v", errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieUserRole_validation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "hello",
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(100),
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(511),
|
||||||
|
ErrCount: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: acctest.RandString(512),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateOpsGenieUserRole(tc.Value, "opsgenie_team")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the OpsGenie User Role Validation to trigger a validation error: %v", errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieUser_basic(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieUser_basic, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieUserDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckOpsGenieUserExists("opsgenie_user.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccOpsGenieUser_complete(t *testing.T) {
|
||||||
|
ri := acctest.RandInt()
|
||||||
|
config := fmt.Sprintf(testAccOpsGenieUser_complete, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckOpsGenieUserDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckOpsGenieUserExists("opsgenie_user.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCheckOpsGenieUserDestroy(s *terraform.State) error {
|
||||||
|
client := testAccProvider.Meta().(*OpsGenieClient).users
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "opsgenie_user" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
req := user.GetUserRequest{
|
||||||
|
Id: rs.Primary.Attributes["id"],
|
||||||
|
}
|
||||||
|
|
||||||
|
result, _ := client.Get(req)
|
||||||
|
if result != nil {
|
||||||
|
return fmt.Errorf("User still exists:\n%#v", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCheckOpsGenieUserExists(name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
// Ensure we have enough information in state to look up in API
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := rs.Primary.Attributes["id"]
|
||||||
|
username := rs.Primary.Attributes["username"]
|
||||||
|
|
||||||
|
client := testAccProvider.Meta().(*OpsGenieClient).users
|
||||||
|
|
||||||
|
req := user.GetUserRequest{
|
||||||
|
Id: rs.Primary.Attributes["id"],
|
||||||
|
}
|
||||||
|
|
||||||
|
result, _ := client.Get(req)
|
||||||
|
if result == nil {
|
||||||
|
return fmt.Errorf("Bad: User %q (username: %q) does not exist", id, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccOpsGenieUser_basic = `
|
||||||
|
resource "opsgenie_user" "test" {
|
||||||
|
username = "acctest-%d@example.tld"
|
||||||
|
full_name = "Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccOpsGenieUser_complete = `
|
||||||
|
resource "opsgenie_user" "test" {
|
||||||
|
username = "acctest-%d@example.tld"
|
||||||
|
full_name = "Acceptance Test User"
|
||||||
|
role = "User"
|
||||||
|
locale = "en_GB"
|
||||||
|
timezone = "Etc/GMT"
|
||||||
|
}
|
||||||
|
`
|
|
@ -0,0 +1,14 @@
|
||||||
|
package opsgenie
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkOpsGenieResponse(code int, status string) error {
|
||||||
|
if code == http.StatusOK {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Unexpected Status Code '%d', Response '%s'", code, status)
|
||||||
|
}
|
|
@ -86,7 +86,7 @@ func resourcePostgreSQLDatabase() *schema.Resource {
|
||||||
dbConnLimitAttr: {
|
dbConnLimitAttr: {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Default: -1,
|
||||||
Description: "How many concurrent connections can be made to this database",
|
Description: "How many concurrent connections can be made to this database",
|
||||||
ValidateFunc: validateConnLimit,
|
ValidateFunc: validateConnLimit,
|
||||||
},
|
},
|
||||||
|
|
|
@ -77,7 +77,7 @@ func resourcePostgreSQLRole() *schema.Resource {
|
||||||
roleConnLimitAttr: {
|
roleConnLimitAttr: {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Default: -1,
|
||||||
Description: "How many concurrent connections can be made with this role",
|
Description: "How many concurrent connections can be made with this role",
|
||||||
ValidateFunc: validateConnLimit,
|
ValidateFunc: validateConnLimit,
|
||||||
},
|
},
|
||||||
|
@ -484,7 +484,7 @@ func setRoleConnLimit(conn *sql.DB, d *schema.ResourceData) error {
|
||||||
|
|
||||||
connLimit := d.Get(roleConnLimitAttr).(int)
|
connLimit := d.Get(roleConnLimitAttr).(int)
|
||||||
roleName := d.Get(roleNameAttr).(string)
|
roleName := d.Get(roleNameAttr).(string)
|
||||||
query := fmt.Sprintf("ALTER ROLE %s CONNECTION LIMIT = %d", pq.QuoteIdentifier(roleName), connLimit)
|
query := fmt.Sprintf("ALTER ROLE %s CONNECTION LIMIT %d", pq.QuoteIdentifier(roleName), connLimit)
|
||||||
if _, err := conn.Query(query); err != nil {
|
if _, err := conn.Query(query); err != nil {
|
||||||
return errwrap.Wrapf("Error updating role CONNECTION LIMIT: {{err}}", err)
|
return errwrap.Wrapf("Error updating role CONNECTION LIMIT: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,41 +19,47 @@ func TestAccPostgresqlRole_Basic(t *testing.T) {
|
||||||
Config: testAccPostgresqlRoleConfig,
|
Config: testAccPostgresqlRoleConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckPostgresqlRoleExists("postgresql_role.myrole2", "true"),
|
testAccCheckPostgresqlRoleExists("postgresql_role.myrole2", "true"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "name", "testing_role_with_defaults"),
|
||||||
"postgresql_role.myrole2", "name", "myrole2"),
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "superuser", "false"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "create_database", "false"),
|
||||||
"postgresql_role.myrole2", "login", "true"),
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "create_role", "false"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "inherit", "false"),
|
||||||
"postgresql_role.myrole2", "skip_drop_role", "false"),
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "replication", "false"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "bypass_row_level_security", "false"),
|
||||||
"postgresql_role.myrole2", "skip_reassign_owned", "false"),
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "connection_limit", "-1"),
|
||||||
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "encrypted_password", "true"),
|
||||||
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "password", ""),
|
||||||
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "valid_until", "infinity"),
|
||||||
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "skip_drop_role", "false"),
|
||||||
|
resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "skip_reassign_owned", "false"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
resource.TestCheckResourceAttr(
|
func TestAccPostgresqlRole_Update(t *testing.T) {
|
||||||
"postgresql_role.role_with_defaults", "name", "testing_role_with_defaults"),
|
resource.Test(t, resource.TestCase{
|
||||||
resource.TestCheckResourceAttr(
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
"postgresql_role.role_with_defaults", "superuser", "false"),
|
Providers: testAccProviders,
|
||||||
resource.TestCheckResourceAttr(
|
CheckDestroy: testAccCheckPostgresqlRoleDestroy,
|
||||||
"postgresql_role.role_with_defaults", "create_database", "false"),
|
Steps: []resource.TestStep{
|
||||||
resource.TestCheckResourceAttr(
|
{
|
||||||
"postgresql_role.role_with_defaults", "create_role", "false"),
|
Config: testAccPostgresqlRoleUpdate1Config,
|
||||||
resource.TestCheckResourceAttr(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
"postgresql_role.role_with_defaults", "inherit", "false"),
|
testAccCheckPostgresqlRoleExists("postgresql_role.update_role", "true"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.update_role", "name", "update_role"),
|
||||||
"postgresql_role.role_with_defaults", "replication", "false"),
|
resource.TestCheckResourceAttr("postgresql_role.update_role", "login", "true"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.update_role", "connection_limit", "-1"),
|
||||||
"postgresql_role.role_with_defaults", "bypass_row_level_security", "false"),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
},
|
||||||
"postgresql_role.role_with_defaults", "connection_limit", "-1"),
|
{
|
||||||
resource.TestCheckResourceAttr(
|
Config: testAccPostgresqlRoleUpdate2Config,
|
||||||
"postgresql_role.role_with_defaults", "encrypted_password", "true"),
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
testAccCheckPostgresqlRoleExists("postgresql_role.update_role", "true"),
|
||||||
"postgresql_role.role_with_defaults", "password", ""),
|
resource.TestCheckResourceAttr("postgresql_role.update_role", "name", "update_role2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("postgresql_role.update_role", "login", "true"),
|
||||||
"postgresql_role.role_with_defaults", "valid_until", "infinity"),
|
resource.TestCheckResourceAttr("postgresql_role.update_role", "connection_limit", "5"),
|
||||||
resource.TestCheckResourceAttr(
|
|
||||||
"postgresql_role.role_with_defaults", "skip_drop_role", "false"),
|
|
||||||
resource.TestCheckResourceAttr(
|
|
||||||
"postgresql_role.role_with_defaults", "skip_reassign_owned", "false"),
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -177,3 +183,18 @@ resource "postgresql_role" "role_with_defaults" {
|
||||||
valid_until = "infinity"
|
valid_until = "infinity"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
var testAccPostgresqlRoleUpdate1Config = `
|
||||||
|
resource "postgresql_role" "update_role" {
|
||||||
|
name = "update_role"
|
||||||
|
login = true
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccPostgresqlRoleUpdate2Config = `
|
||||||
|
resource "postgresql_role" "update_role" {
|
||||||
|
name = "update_role2"
|
||||||
|
login = true
|
||||||
|
connection_limit = 5
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
|
@ -245,7 +245,7 @@ func resourceRancherStackUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
stateConf = &resource.StateChangeConf{
|
stateConf = &resource.StateChangeConf{
|
||||||
Pending: []string{"active", "upgraded"},
|
Pending: []string{"active", "upgraded", "finishing-upgrade"},
|
||||||
Target: []string{"active"},
|
Target: []string{"active"},
|
||||||
Refresh: StackStateRefreshFunc(client, stack.Id),
|
Refresh: StackStateRefreshFunc(client, stack.Id),
|
||||||
Timeout: 10 * time.Minute,
|
Timeout: 10 * time.Minute,
|
||||||
|
|
|
@ -48,21 +48,7 @@ func deleteRunningServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resource.Retry(20*time.Minute, func() *resource.RetryError {
|
return waitForServerState(scaleway, server.Identifier, "stopped")
|
||||||
_, err := scaleway.GetServer(server.Identifier)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return resource.RetryableError(fmt.Errorf("Waiting for server %q to be deleted", server.Identifier))
|
|
||||||
}
|
|
||||||
|
|
||||||
if serr, ok := err.(api.ScalewayAPIError); ok {
|
|
||||||
if serr.StatusCode == 404 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resource.RetryableError(err)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteStoppedServer needs to cleanup attached root volumes. this is not done
|
// deleteStoppedServer needs to cleanup attached root volumes. this is not done
|
||||||
|
@ -83,20 +69,37 @@ func deleteStoppedServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer)
|
||||||
// NOTE copied from github.com/scaleway/scaleway-cli/pkg/api/helpers.go
|
// NOTE copied from github.com/scaleway/scaleway-cli/pkg/api/helpers.go
|
||||||
// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway
|
// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway
|
||||||
|
|
||||||
func waitForServerState(scaleway *api.ScalewayAPI, serverID, targetState string) error {
|
var allStates = []string{"starting", "running", "stopping", "stopped"}
|
||||||
return resource.Retry(60*time.Minute, func() *resource.RetryError {
|
|
||||||
scaleway.ClearCache()
|
|
||||||
|
|
||||||
|
func waitForServerState(scaleway *api.ScalewayAPI, serverID, targetState string) error {
|
||||||
|
pending := []string{}
|
||||||
|
for _, state := range allStates {
|
||||||
|
if state != targetState {
|
||||||
|
pending = append(pending, state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: pending,
|
||||||
|
Target: []string{targetState},
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
s, err := scaleway.GetServer(serverID)
|
s, err := scaleway.GetServer(serverID)
|
||||||
|
|
||||||
if err != nil {
|
if err == nil {
|
||||||
return resource.NonRetryableError(err)
|
return 42, s.State, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.State != targetState {
|
if serr, ok := err.(api.ScalewayAPIError); ok {
|
||||||
return resource.RetryableError(fmt.Errorf("Waiting for server to enter %q state", targetState))
|
if serr.StatusCode == 404 {
|
||||||
|
return 42, "stopped", nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return 42, s.State, err
|
||||||
})
|
},
|
||||||
|
Timeout: 60 * time.Minute,
|
||||||
|
MinTimeout: 5 * time.Second,
|
||||||
|
Delay: 5 * time.Second,
|
||||||
|
}
|
||||||
|
_, err := stateConf.WaitForState()
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,10 @@ func resourceStatusCakeTest() *schema.Resource {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
"confirmations": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,6 +76,7 @@ func CreateTest(d *schema.ResourceData, meta interface{}) error {
|
||||||
Paused: d.Get("paused").(bool),
|
Paused: d.Get("paused").(bool),
|
||||||
Timeout: d.Get("timeout").(int),
|
Timeout: d.Get("timeout").(int),
|
||||||
ContactID: d.Get("contact_id").(int),
|
ContactID: d.Get("contact_id").(int),
|
||||||
|
Confirmation: d.Get("confirmations").(int),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating new StatusCake Test: %s", d.Get("website_name").(string))
|
log.Printf("[DEBUG] Creating new StatusCake Test: %s", d.Get("website_name").(string))
|
||||||
|
@ -134,6 +139,7 @@ func ReadTest(d *schema.ResourceData, meta interface{}) error {
|
||||||
d.Set("paused", testResp.Paused)
|
d.Set("paused", testResp.Paused)
|
||||||
d.Set("timeout", testResp.Timeout)
|
d.Set("timeout", testResp.Timeout)
|
||||||
d.Set("contact_id", testResp.ContactID)
|
d.Set("contact_id", testResp.ContactID)
|
||||||
|
d.Set("confirmations", testResp.Confirmation)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -167,5 +173,8 @@ func getStatusCakeTestInput(d *schema.ResourceData) *statuscake.Test {
|
||||||
if v, ok := d.GetOk("contact_id"); ok {
|
if v, ok := d.GetOk("contact_id"); ok {
|
||||||
test.ContactID = v.(int)
|
test.ContactID = v.(int)
|
||||||
}
|
}
|
||||||
|
if v, ok := d.GetOk("confirmations"); ok {
|
||||||
|
test.Confirmation = v.(int)
|
||||||
|
}
|
||||||
return test
|
return test
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,7 @@ func TestAccStatusCake_withUpdate(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr("statuscake_test.google", "check_rate", "500"),
|
resource.TestCheckResourceAttr("statuscake_test.google", "check_rate", "500"),
|
||||||
resource.TestCheckResourceAttr("statuscake_test.google", "paused", "true"),
|
resource.TestCheckResourceAttr("statuscake_test.google", "paused", "true"),
|
||||||
resource.TestCheckResourceAttr("statuscake_test.google", "contact_id", "0"),
|
resource.TestCheckResourceAttr("statuscake_test.google", "contact_id", "0"),
|
||||||
|
resource.TestCheckResourceAttr("statuscake_test.google", "confirmations", "0"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -116,6 +117,8 @@ func testAccTestCheckAttributes(rn string, test *statuscake.Test) resource.TestC
|
||||||
err = check(key, value, strconv.Itoa(test.Timeout))
|
err = check(key, value, strconv.Itoa(test.Timeout))
|
||||||
case "contact_id":
|
case "contact_id":
|
||||||
err = check(key, value, strconv.Itoa(test.ContactID))
|
err = check(key, value, strconv.Itoa(test.ContactID))
|
||||||
|
case "confirmations":
|
||||||
|
err = check(key, value, strconv.Itoa(test.Confirmation))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -145,6 +148,7 @@ resource "statuscake_test" "google" {
|
||||||
test_type = "HTTP"
|
test_type = "HTTP"
|
||||||
check_rate = 300
|
check_rate = 300
|
||||||
contact_id = 12345
|
contact_id = 12345
|
||||||
|
confirmations = 1
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ import (
|
||||||
nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad"
|
nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad"
|
||||||
nullprovider "github.com/hashicorp/terraform/builtin/providers/null"
|
nullprovider "github.com/hashicorp/terraform/builtin/providers/null"
|
||||||
openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack"
|
openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack"
|
||||||
|
opsgenieprovider "github.com/hashicorp/terraform/builtin/providers/opsgenie"
|
||||||
packetprovider "github.com/hashicorp/terraform/builtin/providers/packet"
|
packetprovider "github.com/hashicorp/terraform/builtin/providers/packet"
|
||||||
pagerdutyprovider "github.com/hashicorp/terraform/builtin/providers/pagerduty"
|
pagerdutyprovider "github.com/hashicorp/terraform/builtin/providers/pagerduty"
|
||||||
postgresqlprovider "github.com/hashicorp/terraform/builtin/providers/postgresql"
|
postgresqlprovider "github.com/hashicorp/terraform/builtin/providers/postgresql"
|
||||||
|
@ -106,6 +107,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{
|
||||||
"nomad": nomadprovider.Provider,
|
"nomad": nomadprovider.Provider,
|
||||||
"null": nullprovider.Provider,
|
"null": nullprovider.Provider,
|
||||||
"openstack": openstackprovider.Provider,
|
"openstack": openstackprovider.Provider,
|
||||||
|
"opsgenie": opsgenieprovider.Provider,
|
||||||
"packet": packetprovider.Provider,
|
"packet": packetprovider.Provider,
|
||||||
"pagerduty": pagerdutyprovider.Provider,
|
"pagerduty": pagerdutyprovider.Provider,
|
||||||
"postgresql": postgresqlprovider.Provider,
|
"postgresql": postgresqlprovider.Provider,
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
# Empty
|
|
@ -0,0 +1 @@
|
||||||
|
module "root" { source = "./child" }
|
|
@ -0,0 +1,3 @@
|
||||||
|
module "child" {
|
||||||
|
source = "./child"
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
# Empty
|
|
@ -0,0 +1,3 @@
|
||||||
|
module "root" {
|
||||||
|
source = "./child"
|
||||||
|
}
|
|
@ -261,6 +261,14 @@ func (t *Tree) Validate() error {
|
||||||
// If something goes wrong, here is our error template
|
// If something goes wrong, here is our error template
|
||||||
newErr := &TreeError{Name: []string{t.Name()}}
|
newErr := &TreeError{Name: []string{t.Name()}}
|
||||||
|
|
||||||
|
// Terraform core does not handle root module children named "root".
|
||||||
|
// We plan to fix this in the future but this bug was brought up in
|
||||||
|
// the middle of a release and we don't want to introduce wide-sweeping
|
||||||
|
// changes at that time.
|
||||||
|
if len(t.path) == 1 && t.name == "root" {
|
||||||
|
return fmt.Errorf("root module cannot contain module named 'root'")
|
||||||
|
}
|
||||||
|
|
||||||
// Validate our configuration first.
|
// Validate our configuration first.
|
||||||
if err := t.config.Validate(); err != nil {
|
if err := t.config.Validate(); err != nil {
|
||||||
newErr.Err = err
|
newErr.Err = err
|
||||||
|
|
|
@ -288,6 +288,18 @@ func TestTreeValidate_table(t *testing.T) {
|
||||||
"validate-alias-bad",
|
"validate-alias-bad",
|
||||||
"alias must be defined",
|
"alias must be defined",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"root module named root",
|
||||||
|
"validate-module-root",
|
||||||
|
"cannot contain module",
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"grandchild module named root",
|
||||||
|
"validate-module-root-grandchild",
|
||||||
|
"",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range cases {
|
for i, tc := range cases {
|
||||||
|
|
|
@ -79,7 +79,7 @@ func (g *marshalGraph) Dot(opts *DotOpts) []byte {
|
||||||
return w.Bytes()
|
return w.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *marshalVertex) dot(g *marshalGraph) []byte {
|
func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
graphName := g.Name
|
graphName := g.Name
|
||||||
if graphName == "" {
|
if graphName == "" {
|
||||||
|
@ -89,7 +89,7 @@ func (v *marshalVertex) dot(g *marshalGraph) []byte {
|
||||||
name := v.Name
|
name := v.Name
|
||||||
attrs := v.Attrs
|
attrs := v.Attrs
|
||||||
if v.graphNodeDotter != nil {
|
if v.graphNodeDotter != nil {
|
||||||
node := v.graphNodeDotter.DotNode(name, nil)
|
node := v.graphNodeDotter.DotNode(name, opts)
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return []byte{}
|
return []byte{}
|
||||||
}
|
}
|
||||||
|
@ -171,7 +171,7 @@ func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Write(v.dot(g))
|
w.Write(v.dot(g, opts))
|
||||||
}
|
}
|
||||||
|
|
||||||
var dotEdges []string
|
var dotEdges []string
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
package dag
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGraphDot_opts(t *testing.T) {
|
||||||
|
var v testDotVertex
|
||||||
|
var g Graph
|
||||||
|
g.Add(&v)
|
||||||
|
|
||||||
|
opts := &DotOpts{MaxDepth: 42}
|
||||||
|
actual := g.Dot(opts)
|
||||||
|
if len(actual) == 0 {
|
||||||
|
t.Fatal("should not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !v.DotNodeCalled {
|
||||||
|
t.Fatal("should call DotNode")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(v.DotNodeOpts, opts) {
|
||||||
|
t.Fatalf("bad; %#v", v.DotNodeOpts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testDotVertex struct {
|
||||||
|
DotNodeCalled bool
|
||||||
|
DotNodeTitle string
|
||||||
|
DotNodeOpts *DotOpts
|
||||||
|
DotNodeReturn *DotNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *testDotVertex) DotNode(title string, opts *DotOpts) *DotNode {
|
||||||
|
v.DotNodeCalled = true
|
||||||
|
v.DotNodeTitle = title
|
||||||
|
v.DotNodeOpts = opts
|
||||||
|
return v.DotNodeReturn
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package flatmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
@ -42,9 +43,43 @@ func expandArray(m map[string]string, prefix string) []interface{} {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The Schema "Set" type stores its values in an array format, but using
|
||||||
|
// numeric hash values instead of ordinal keys. Take the set of keys
|
||||||
|
// regardless of value, and expand them in numeric order.
|
||||||
|
// See GH-11042 for more details.
|
||||||
|
keySet := map[int]bool{}
|
||||||
|
for k := range m {
|
||||||
|
if !strings.HasPrefix(k, prefix+".") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key := k[len(prefix)+1:]
|
||||||
|
idx := strings.Index(key, ".")
|
||||||
|
if idx != -1 {
|
||||||
|
key = key[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// skip the count value
|
||||||
|
if key == "#" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
k, err := strconv.Atoi(key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
keySet[int(k)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
keysList := make([]int, 0, num)
|
||||||
|
for key := range keySet {
|
||||||
|
keysList = append(keysList, key)
|
||||||
|
}
|
||||||
|
sort.Ints(keysList)
|
||||||
|
|
||||||
result := make([]interface{}, num)
|
result := make([]interface{}, num)
|
||||||
for i := 0; i < int(num); i++ {
|
for i, key := range keysList {
|
||||||
result[i] = Expand(m, fmt.Sprintf("%s.%d", prefix, i))
|
result[i] = Expand(m, fmt.Sprintf("%s.%d", prefix, key))
|
||||||
}
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -106,6 +106,17 @@ func TestExpand(t *testing.T) {
|
||||||
"list2": []interface{}{"c"},
|
"list2": []interface{}{"c"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
Map: map[string]string{
|
||||||
|
"set.#": "3",
|
||||||
|
"set.1234": "a",
|
||||||
|
"set.1235": "b",
|
||||||
|
"set.1236": "c",
|
||||||
|
},
|
||||||
|
Key: "set",
|
||||||
|
Output: []interface{}{"a", "b", "c"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue