Merge branch 'master' into 2087-consul-service-resource
This commit is contained in:
commit
fce89ec1ae
|
@ -18,3 +18,4 @@ website/node_modules
|
||||||
*.bak
|
*.bak
|
||||||
*~
|
*~
|
||||||
.*.swp
|
.*.swp
|
||||||
|
.idea
|
||||||
|
|
49
CHANGELOG.md
49
CHANGELOG.md
|
@ -3,31 +3,63 @@
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
|
||||||
* **New provider: `rundeck`** [GH-2412]
|
* **New provider: `rundeck`** [GH-2412]
|
||||||
|
* **New provider: `packet`** [GH-2260], [GH-3472]
|
||||||
|
* **New provider: `vsphere`**: Initial support for a VM resource [GH-3419]
|
||||||
* **New resource: `cloudstack_loadbalancer_rule`** [GH-2934]
|
* **New resource: `cloudstack_loadbalancer_rule`** [GH-2934]
|
||||||
* **New resource: `google_compute_project_metadata`** [GH-3065]
|
* **New resource: `google_compute_project_metadata`** [GH-3065]
|
||||||
* **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** [GH-2874]
|
* **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** [GH-2784]
|
||||||
|
* **New resources: `aws_cloudwatch_log_group`** [GH-2415]
|
||||||
* **New resource: `google_storage_bucket_object`** [GH-3192]
|
* **New resource: `google_storage_bucket_object`** [GH-3192]
|
||||||
* **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** [GH-3213]
|
* **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** [GH-3213]
|
||||||
|
* **New resources: `google_storage_bucket_acl`, `google_storage_object_acl`** [GH-3272]
|
||||||
|
* **New resource: `aws_iam_saml_provider`** [GH-3156]
|
||||||
|
* **New resources: `aws_efs_file_system` and `aws_efs_mount_target`** [GH-2196]
|
||||||
|
* **New resources: `aws_opsworks_*`** [GH-2162]
|
||||||
|
* **New resource: `aws_elasticsearch_domain`** [GH-3443]
|
||||||
|
* **New resource: `aws_directory_service_directory`** [GH-3228]
|
||||||
|
* **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351]
|
||||||
|
* **New resource: `aws_placement_group`** [GH-3457]
|
||||||
|
* **New resource: `aws_glacier_vault`** [GH-3491]
|
||||||
|
* **New lifecycle flag: `ignore_changes`** [GH-2525]
|
||||||
|
|
||||||
IMPROVEMENTS:
|
IMPROVEMENTS:
|
||||||
|
|
||||||
* core: Add a function to find the index of an element in a list. [GH-2704]
|
* core: Add a function to find the index of an element in a list. [GH-2704]
|
||||||
* core: Print all outputs when `terraform output` is called with no arguments [GH-2920]
|
* core: Print all outputs when `terraform output` is called with no arguments [GH-2920]
|
||||||
* core: In plan output summary, count resource replacement as Add/Remove instead of Change [GH-3173]
|
* core: In plan output summary, count resource replacement as Add/Remove instead of Change [GH-3173]
|
||||||
|
* core: Add interpolation functions for base64 encoding and decoding. [GH-3325]
|
||||||
|
* core: Expose parallelism as a CLI option instead of a hard-coding the default of 10 [GH-3365]
|
||||||
|
* core: Add interpolation function `compact`, to remove empty elements from a list. [GH-3239], [GH-3479]
|
||||||
|
* core: Allow filtering of log output by level, using e.g. ``TF_LOG=INFO`` [GH-3380]
|
||||||
* provider/aws: Add `instance_initiated_shutdown_behavior` to AWS Instance [GH-2887]
|
* provider/aws: Add `instance_initiated_shutdown_behavior` to AWS Instance [GH-2887]
|
||||||
* provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` [GH-3061]
|
* provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` [GH-3061]
|
||||||
* provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating [GH-3053]
|
* provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating [GH-3053]
|
||||||
* provider/aws: Paginate notifications returned for ASG Notifications [GH-3043]
|
* provider/aws: Paginate notifications returned for ASG Notifications [GH-3043]
|
||||||
|
* provider/aws: Adds additional S3 Bucket Object inputs [GH-3265]
|
||||||
* provider/aws: add `ses_smtp_password` to `aws_iam_access_key` [GH-3165]
|
* provider/aws: add `ses_smtp_password` to `aws_iam_access_key` [GH-3165]
|
||||||
* provider/aws: read `iam_instance_profile` for `aws_instance` and save to state [GH-3167]
|
* provider/aws: read `iam_instance_profile` for `aws_instance` and save to state [GH-3167]
|
||||||
|
* provider/aws: allow `instance` to be computed in `aws_eip` [GH-3036]
|
||||||
* provider/aws: Add `versioning` option to `aws_s3_bucket` [GH-2942]
|
* provider/aws: Add `versioning` option to `aws_s3_bucket` [GH-2942]
|
||||||
* provider/aws: Add `configuation_endpoint` to `aws_elasticache_cluster` [GH-3250]
|
* provider/aws: Add `configuation_endpoint` to `aws_elasticache_cluster` [GH-3250]
|
||||||
|
* provider/aws: Add validation for `app_cookie_stickiness_policy.name` [GH-3277]
|
||||||
|
* provider/aws: Add validation for `db_parameter_group.name` [GH-3279]
|
||||||
|
* provider/aws: Set DynamoDB Table ARN after creation [GH-3500]
|
||||||
|
* provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200]
|
||||||
|
* provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397]
|
||||||
|
* provider/aws: Configurable capacity waiting duration for ASGs [GH-3191]
|
||||||
|
* provider/aws: Allow non-persistent Spot Requests [GH-3311]
|
||||||
|
* provider/aws: Support tags for AWS DB subnet group [GH-3138]
|
||||||
* provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035]
|
* provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035]
|
||||||
|
* provider/openstack: add functionality to attach FloatingIP to Port [GH-1788]
|
||||||
|
* provider/google: Can now do multi-region deployments without using multiple providers [GH-3258]
|
||||||
|
* remote/s3: Allow canned ACLs to be set on state objects. [GH-3233]
|
||||||
|
* remote/s3: Remote state is stored in S3 with `Content-Type: application/json` [GH-3385]
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
|
|
||||||
* core: Fix problems referencing list attributes in interpolations [GH-2157]
|
* core: Fix problems referencing list attributes in interpolations [GH-2157]
|
||||||
* core: don't error on computed value during input walk [GH-2988]
|
* core: don't error on computed value during input walk [GH-2988]
|
||||||
|
* core: Ignore missing variables during destroy phase [GH-3393]
|
||||||
* provider/google: Crashes with interface conversion in GCE Instance Template [GH-3027]
|
* provider/google: Crashes with interface conversion in GCE Instance Template [GH-3027]
|
||||||
* provider/google: Convert int to int64 when building the GKE cluster.NodeConfig struct [GH-2978]
|
* provider/google: Convert int to int64 when building the GKE cluster.NodeConfig struct [GH-2978]
|
||||||
* provider/google: google_compute_instance_template.network_interface.network should be a URL [GH-3226]
|
* provider/google: google_compute_instance_template.network_interface.network should be a URL [GH-3226]
|
||||||
|
@ -40,11 +72,26 @@ BUG FIXES:
|
||||||
by AWS [GH-3120]
|
by AWS [GH-3120]
|
||||||
* provider/aws: Read instance source_dest_check and save to state [GH-3152]
|
* provider/aws: Read instance source_dest_check and save to state [GH-3152]
|
||||||
* provider/aws: Allow `weight = 0` in Route53 records [GH-3196]
|
* provider/aws: Allow `weight = 0` in Route53 records [GH-3196]
|
||||||
|
* provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235]
|
||||||
|
* provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313]
|
||||||
|
* provider/aws: Update Security Group Rules to Version 2 [GH-3019]
|
||||||
|
* provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470]
|
||||||
|
* provider/aws: Fix force_delete on autoscaling groups [GH-3485]
|
||||||
|
* provider/aws: Fix crash with VPC Peering connections [GH-3490]
|
||||||
|
* provider/docker: Fix issue preventing private images from being referenced [GH-2619]
|
||||||
|
* provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284]
|
||||||
* provider/openstack: add state 'downloading' to list of expected states in
|
* provider/openstack: add state 'downloading' to list of expected states in
|
||||||
`blockstorage_volume_v1` creation [GH-2866]
|
`blockstorage_volume_v1` creation [GH-2866]
|
||||||
* provider/openstack: remove security groups (by name) before adding security
|
* provider/openstack: remove security groups (by name) before adding security
|
||||||
groups (by id) [GH-2008]
|
groups (by id) [GH-2008]
|
||||||
|
|
||||||
|
INTERNAL IMPROVEMENTS:
|
||||||
|
|
||||||
|
* core: Makefile target "plugin-dev" for building just one plugin. [GH-3229]
|
||||||
|
* helper/schema: Don't allow ``Update`` func if no attributes can actually be updated, per schema. [GH-3288]
|
||||||
|
* helper/schema: Default hashing function for sets [GH-3018]
|
||||||
|
* helper/multierror: Remove in favor of [github.com/hashicorp/go-multierror](http://github.com/hashicorp/go-multierror). [GH-3336]
|
||||||
|
|
||||||
## 0.6.3 (August 11, 2015)
|
## 0.6.3 (August 11, 2015)
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
|
|
6
Makefile
6
Makefile
|
@ -15,6 +15,12 @@ dev: generate
|
||||||
quickdev: generate
|
quickdev: generate
|
||||||
@TF_QUICKDEV=1 TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'"
|
@TF_QUICKDEV=1 TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'"
|
||||||
|
|
||||||
|
# Shorthand for building and installing just one plugin for local testing.
|
||||||
|
# Run as (for example): make plugin-dev PLUGIN=provider-aws
|
||||||
|
plugin-dev: generate
|
||||||
|
go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN)
|
||||||
|
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
|
||||||
|
|
||||||
release: updatedeps
|
release: updatedeps
|
||||||
gox -build-toolchain
|
gox -build-toolchain
|
||||||
@$(MAKE) bin
|
@$(MAKE) bin
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/builtin/providers/packet"
|
||||||
|
"github.com/hashicorp/terraform/plugin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
plugin.Serve(&plugin.ServeOpts{
|
||||||
|
ProviderFunc: packet.Provider,
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/builtin/providers/vsphere"
|
||||||
|
"github.com/hashicorp/terraform/plugin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
plugin.Serve(&plugin.ServeOpts{
|
||||||
|
ProviderFunc: vsphere.Provider,
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
package main
|
|
@ -19,7 +19,6 @@ func resourceArtifact() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceArtifactRead,
|
Create: resourceArtifactRead,
|
||||||
Read: resourceArtifactRead,
|
Read: resourceArtifactRead,
|
||||||
Update: resourceArtifactRead,
|
|
||||||
Delete: resourceArtifactDelete,
|
Delete: resourceArtifactDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
|
|
@ -5,21 +5,27 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||||
|
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||||
|
"github.com/aws/aws-sdk-go/service/directoryservice"
|
||||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/aws/aws-sdk-go/service/ecs"
|
"github.com/aws/aws-sdk-go/service/ecs"
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
"github.com/aws/aws-sdk-go/service/elasticache"
|
"github.com/aws/aws-sdk-go/service/elasticache"
|
||||||
|
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
||||||
"github.com/aws/aws-sdk-go/service/elb"
|
"github.com/aws/aws-sdk-go/service/elb"
|
||||||
|
"github.com/aws/aws-sdk-go/service/glacier"
|
||||||
"github.com/aws/aws-sdk-go/service/iam"
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||||
"github.com/aws/aws-sdk-go/service/lambda"
|
"github.com/aws/aws-sdk-go/service/lambda"
|
||||||
|
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
"github.com/aws/aws-sdk-go/service/route53"
|
"github.com/aws/aws-sdk-go/service/route53"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
@ -42,10 +48,14 @@ type Config struct {
|
||||||
|
|
||||||
type AWSClient struct {
|
type AWSClient struct {
|
||||||
cloudwatchconn *cloudwatch.CloudWatch
|
cloudwatchconn *cloudwatch.CloudWatch
|
||||||
|
cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs
|
||||||
|
dsconn *directoryservice.DirectoryService
|
||||||
dynamodbconn *dynamodb.DynamoDB
|
dynamodbconn *dynamodb.DynamoDB
|
||||||
ec2conn *ec2.EC2
|
ec2conn *ec2.EC2
|
||||||
ecsconn *ecs.ECS
|
ecsconn *ecs.ECS
|
||||||
|
efsconn *efs.EFS
|
||||||
elbconn *elb.ELB
|
elbconn *elb.ELB
|
||||||
|
esconn *elasticsearch.ElasticsearchService
|
||||||
autoscalingconn *autoscaling.AutoScaling
|
autoscalingconn *autoscaling.AutoScaling
|
||||||
s3conn *s3.S3
|
s3conn *s3.S3
|
||||||
sqsconn *sqs.SQS
|
sqsconn *sqs.SQS
|
||||||
|
@ -57,6 +67,8 @@ type AWSClient struct {
|
||||||
kinesisconn *kinesis.Kinesis
|
kinesisconn *kinesis.Kinesis
|
||||||
elasticacheconn *elasticache.ElastiCache
|
elasticacheconn *elasticache.ElastiCache
|
||||||
lambdaconn *lambda.Lambda
|
lambdaconn *lambda.Lambda
|
||||||
|
opsworksconn *opsworks.OpsWorks
|
||||||
|
glacierconn *glacier.Glacier
|
||||||
}
|
}
|
||||||
|
|
||||||
// Client configures and returns a fully initialized AWSClient
|
// Client configures and returns a fully initialized AWSClient
|
||||||
|
@ -102,6 +114,16 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
MaxRetries: aws.Int(c.MaxRetries),
|
MaxRetries: aws.Int(c.MaxRetries),
|
||||||
Endpoint: aws.String(c.DynamoDBEndpoint),
|
Endpoint: aws.String(c.DynamoDBEndpoint),
|
||||||
}
|
}
|
||||||
|
// Some services exist only in us-east-1, e.g. because they manage
|
||||||
|
// resources that can span across multiple regions, or because
|
||||||
|
// signature format v4 requires region to be us-east-1 for global
|
||||||
|
// endpoints:
|
||||||
|
// http://docs.aws.amazon.com/general/latest/gr/sigv4_changes.html
|
||||||
|
usEast1AwsConfig := &aws.Config{
|
||||||
|
Credentials: creds,
|
||||||
|
Region: aws.String("us-east-1"),
|
||||||
|
MaxRetries: aws.Int(c.MaxRetries),
|
||||||
|
}
|
||||||
|
|
||||||
log.Println("[INFO] Initializing DynamoDB connection")
|
log.Println("[INFO] Initializing DynamoDB connection")
|
||||||
client.dynamodbconn = dynamodb.New(awsDynamoDBConfig)
|
client.dynamodbconn = dynamodb.New(awsDynamoDBConfig)
|
||||||
|
@ -138,15 +160,14 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
log.Println("[INFO] Initializing ECS Connection")
|
log.Println("[INFO] Initializing ECS Connection")
|
||||||
client.ecsconn = ecs.New(awsConfig)
|
client.ecsconn = ecs.New(awsConfig)
|
||||||
|
|
||||||
// aws-sdk-go uses v4 for signing requests, which requires all global
|
log.Println("[INFO] Initializing EFS Connection")
|
||||||
// endpoints to use 'us-east-1'.
|
client.efsconn = efs.New(awsConfig)
|
||||||
// See http://docs.aws.amazon.com/general/latest/gr/sigv4_changes.html
|
|
||||||
|
log.Println("[INFO] Initializing ElasticSearch Connection")
|
||||||
|
client.esconn = elasticsearch.New(awsConfig)
|
||||||
|
|
||||||
log.Println("[INFO] Initializing Route 53 connection")
|
log.Println("[INFO] Initializing Route 53 connection")
|
||||||
client.r53conn = route53.New(&aws.Config{
|
client.r53conn = route53.New(usEast1AwsConfig)
|
||||||
Credentials: creds,
|
|
||||||
Region: aws.String("us-east-1"),
|
|
||||||
MaxRetries: aws.Int(c.MaxRetries),
|
|
||||||
})
|
|
||||||
|
|
||||||
log.Println("[INFO] Initializing Elasticache Connection")
|
log.Println("[INFO] Initializing Elasticache Connection")
|
||||||
client.elasticacheconn = elasticache.New(awsConfig)
|
client.elasticacheconn = elasticache.New(awsConfig)
|
||||||
|
@ -156,6 +177,18 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
|
|
||||||
log.Println("[INFO] Initializing CloudWatch SDK connection")
|
log.Println("[INFO] Initializing CloudWatch SDK connection")
|
||||||
client.cloudwatchconn = cloudwatch.New(awsConfig)
|
client.cloudwatchconn = cloudwatch.New(awsConfig)
|
||||||
|
|
||||||
|
log.Println("[INFO] Initializing CloudWatch Logs connection")
|
||||||
|
client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig)
|
||||||
|
|
||||||
|
log.Println("[INFO] Initializing OpsWorks Connection")
|
||||||
|
client.opsworksconn = opsworks.New(usEast1AwsConfig)
|
||||||
|
|
||||||
|
log.Println("[INFO] Initializing Directory Service connection")
|
||||||
|
client.dsconn = directoryservice.New(awsConfig)
|
||||||
|
|
||||||
|
log.Println("[INFO] Initializing Glacier connection")
|
||||||
|
client.glacierconn = glacier.New(awsConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
|
@ -221,6 +254,7 @@ func (c *Config) ValidateAccountId(iamconn *iam.IAM) error {
|
||||||
// User may be an IAM instance profile, so fail silently.
|
// User may be an IAM instance profile, so fail silently.
|
||||||
// If it is an IAM instance profile
|
// If it is an IAM instance profile
|
||||||
// validating account might be superfluous
|
// validating account might be superfluous
|
||||||
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Failed getting account ID from IAM: %s", err)
|
return fmt.Errorf("Failed getting account ID from IAM: %s", err)
|
||||||
// return error if the account id is explicitly not authorised
|
// return error if the account id is explicitly not authorised
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/awslabs/aws-sdk-go/aws"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func makeAwsStringList(in []interface{}) []*string {
|
||||||
|
ret := make([]*string, len(in), len(in))
|
||||||
|
for i := 0; i < len(in); i++ {
|
||||||
|
ret[i] = aws.String(in[i].(string))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeAwsStringSet(in *schema.Set) []*string {
|
||||||
|
inList := in.List()
|
||||||
|
ret := make([]*string, len(inList), len(inList))
|
||||||
|
for i := 0; i < len(ret); i++ {
|
||||||
|
ret[i] = aws.String(inList[i].(string))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func unwrapAwsStringList(in []*string) []string {
|
||||||
|
ret := make([]string, len(in), len(in))
|
||||||
|
for i := 0; i < len(in); i++ {
|
||||||
|
if in[i] != nil {
|
||||||
|
ret[i] = *in[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -29,7 +29,7 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
|
||||||
From: aws.Int64(int64(data["from_port"].(int))),
|
From: aws.Int64(int64(data["from_port"].(int))),
|
||||||
To: aws.Int64(int64(data["to_port"].(int))),
|
To: aws.Int64(int64(data["to_port"].(int))),
|
||||||
},
|
},
|
||||||
Egress: aws.Bool((entryType == "egress")),
|
Egress: aws.Bool(entryType == "egress"),
|
||||||
RuleAction: aws.String(data["action"].(string)),
|
RuleAction: aws.String(data["action"].(string)),
|
||||||
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
|
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
|
||||||
CidrBlock: aws.String(data["cidr_block"].(string)),
|
CidrBlock: aws.String(data["cidr_block"].(string)),
|
||||||
|
|
|
@ -0,0 +1,558 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpsWorks has a single concept of "layer" which represents several different
|
||||||
|
// layer types. The differences between these are in some extra properties that
|
||||||
|
// get packed into an "Attributes" map, but in the OpsWorks UI these are presented
|
||||||
|
// as first-class options, and so Terraform prefers to expose them this way and
|
||||||
|
// hide the implementation detail that they are all packed into a single type
|
||||||
|
// in the underlying API.
|
||||||
|
//
|
||||||
|
// This file contains utilities that are shared between all of the concrete
|
||||||
|
// layer resource types, which have names matching aws_opsworks_*_layer .
|
||||||
|
|
||||||
|
type opsworksLayerTypeAttribute struct {
|
||||||
|
AttrName string
|
||||||
|
Type schema.ValueType
|
||||||
|
Default interface{}
|
||||||
|
Required bool
|
||||||
|
WriteOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type opsworksLayerType struct {
|
||||||
|
TypeName string
|
||||||
|
DefaultLayerName string
|
||||||
|
Attributes map[string]*opsworksLayerTypeAttribute
|
||||||
|
CustomShortName bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
opsworksTrueString = "1"
|
||||||
|
opsworksFalseString = "0"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) SchemaResource() *schema.Resource {
|
||||||
|
resourceSchema := map[string]*schema.Schema{
|
||||||
|
"id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"auto_assign_elastic_ips": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"auto_assign_public_ips": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_instance_profile_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_setup_recipes": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_configure_recipes": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_deploy_recipes": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_undeploy_recipes": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_shutdown_recipes": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_security_group_ids": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"auto_healing": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"install_updates_on_boot": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"instance_shutdown_timeout": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 120,
|
||||||
|
},
|
||||||
|
|
||||||
|
"drain_elb_on_shutdown": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"system_packages": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"stack_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ForceNew: true,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"use_ebs_optimized_instances": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"ebs_volume": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
|
||||||
|
"iops": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 0,
|
||||||
|
},
|
||||||
|
|
||||||
|
"mount_point": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"number_of_disks": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"raid_level": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "",
|
||||||
|
},
|
||||||
|
|
||||||
|
"size": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "standard",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Set: func(v interface{}) int {
|
||||||
|
m := v.(map[string]interface{})
|
||||||
|
return hashcode.String(m["mount_point"].(string))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if lt.CustomShortName {
|
||||||
|
resourceSchema["short_name"] = &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if lt.DefaultLayerName != "" {
|
||||||
|
resourceSchema["name"] = &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: lt.DefaultLayerName,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resourceSchema["name"] = &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, def := range lt.Attributes {
|
||||||
|
resourceSchema[key] = &schema.Schema{
|
||||||
|
Type: def.Type,
|
||||||
|
Default: def.Default,
|
||||||
|
Required: def.Required,
|
||||||
|
Optional: !def.Required,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: func(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
return lt.Read(d, client)
|
||||||
|
},
|
||||||
|
Create: func(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
return lt.Create(d, client)
|
||||||
|
},
|
||||||
|
Update: func(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
return lt.Update(d, client)
|
||||||
|
},
|
||||||
|
Delete: func(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
return lt.Delete(d, client)
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: resourceSchema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWorks) error {
|
||||||
|
|
||||||
|
req := &opsworks.DescribeLayersInput{
|
||||||
|
LayerIds: []*string{
|
||||||
|
aws.String(d.Id()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Reading OpsWorks layer: %s", d.Id())
|
||||||
|
|
||||||
|
resp, err := client.DescribeLayers(req)
|
||||||
|
if err != nil {
|
||||||
|
if awserr, ok := err.(awserr.Error); ok {
|
||||||
|
if awserr.Code() == "ResourceNotFoundException" {
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
layer := resp.Layers[0]
|
||||||
|
d.Set("id", layer.LayerId)
|
||||||
|
d.Set("auto_assign_elastic_ips", layer.AutoAssignElasticIps)
|
||||||
|
d.Set("auto_assign_public_ips", layer.AutoAssignPublicIps)
|
||||||
|
d.Set("custom_instance_profile_arn", layer.CustomInstanceProfileArn)
|
||||||
|
d.Set("custom_security_group_ids", unwrapAwsStringList(layer.CustomSecurityGroupIds))
|
||||||
|
d.Set("auto_healing", layer.EnableAutoHealing)
|
||||||
|
d.Set("install_updates_on_boot", layer.InstallUpdatesOnBoot)
|
||||||
|
d.Set("name", layer.Name)
|
||||||
|
d.Set("system_packages", unwrapAwsStringList(layer.Packages))
|
||||||
|
d.Set("stack_id", layer.StackId)
|
||||||
|
d.Set("use_ebs_optimized_instances", layer.UseEbsOptimizedInstances)
|
||||||
|
|
||||||
|
if lt.CustomShortName {
|
||||||
|
d.Set("short_name", layer.Shortname)
|
||||||
|
}
|
||||||
|
|
||||||
|
lt.SetAttributeMap(d, layer.Attributes)
|
||||||
|
lt.SetLifecycleEventConfiguration(d, layer.LifecycleEventConfiguration)
|
||||||
|
lt.SetCustomRecipes(d, layer.CustomRecipes)
|
||||||
|
lt.SetVolumeConfigurations(d, layer.VolumeConfigurations)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) Create(d *schema.ResourceData, client *opsworks.OpsWorks) error {
|
||||||
|
|
||||||
|
req := &opsworks.CreateLayerInput{
|
||||||
|
AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)),
|
||||||
|
AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)),
|
||||||
|
CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)),
|
||||||
|
CustomRecipes: lt.CustomRecipes(d),
|
||||||
|
CustomSecurityGroupIds: makeAwsStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
|
||||||
|
EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)),
|
||||||
|
InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)),
|
||||||
|
LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d),
|
||||||
|
Name: aws.String(d.Get("name").(string)),
|
||||||
|
Packages: makeAwsStringSet(d.Get("system_packages").(*schema.Set)),
|
||||||
|
Type: aws.String(lt.TypeName),
|
||||||
|
StackId: aws.String(d.Get("stack_id").(string)),
|
||||||
|
UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)),
|
||||||
|
Attributes: lt.AttributeMap(d),
|
||||||
|
VolumeConfigurations: lt.VolumeConfigurations(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
if lt.CustomShortName {
|
||||||
|
req.Shortname = aws.String(d.Get("short_name").(string))
|
||||||
|
} else {
|
||||||
|
req.Shortname = aws.String(lt.TypeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating OpsWorks layer: %s", d.Id())
|
||||||
|
|
||||||
|
resp, err := client.CreateLayer(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
layerId := *resp.LayerId
|
||||||
|
d.SetId(layerId)
|
||||||
|
d.Set("id", layerId)
|
||||||
|
|
||||||
|
return lt.Read(d, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) Update(d *schema.ResourceData, client *opsworks.OpsWorks) error {
|
||||||
|
|
||||||
|
req := &opsworks.UpdateLayerInput{
|
||||||
|
LayerId: aws.String(d.Id()),
|
||||||
|
AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)),
|
||||||
|
AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)),
|
||||||
|
CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)),
|
||||||
|
CustomRecipes: lt.CustomRecipes(d),
|
||||||
|
CustomSecurityGroupIds: makeAwsStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
|
||||||
|
EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)),
|
||||||
|
InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)),
|
||||||
|
LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d),
|
||||||
|
Name: aws.String(d.Get("name").(string)),
|
||||||
|
Packages: makeAwsStringSet(d.Get("system_packages").(*schema.Set)),
|
||||||
|
UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)),
|
||||||
|
Attributes: lt.AttributeMap(d),
|
||||||
|
VolumeConfigurations: lt.VolumeConfigurations(d),
|
||||||
|
}
|
||||||
|
|
||||||
|
if lt.CustomShortName {
|
||||||
|
req.Shortname = aws.String(d.Get("short_name").(string))
|
||||||
|
} else {
|
||||||
|
req.Shortname = aws.String(lt.TypeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id())
|
||||||
|
|
||||||
|
_, err := client.UpdateLayer(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return lt.Read(d, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) Delete(d *schema.ResourceData, client *opsworks.OpsWorks) error {
|
||||||
|
req := &opsworks.DeleteLayerInput{
|
||||||
|
LayerId: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting OpsWorks layer: %s", d.Id())
|
||||||
|
|
||||||
|
_, err := client.DeleteLayer(req)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) AttributeMap(d *schema.ResourceData) map[string]*string {
|
||||||
|
attrs := map[string]*string{}
|
||||||
|
|
||||||
|
for key, def := range lt.Attributes {
|
||||||
|
value := d.Get(key)
|
||||||
|
switch def.Type {
|
||||||
|
case schema.TypeString:
|
||||||
|
strValue := value.(string)
|
||||||
|
attrs[def.AttrName] = &strValue
|
||||||
|
case schema.TypeInt:
|
||||||
|
intValue := value.(int)
|
||||||
|
strValue := strconv.Itoa(intValue)
|
||||||
|
attrs[def.AttrName] = &strValue
|
||||||
|
case schema.TypeBool:
|
||||||
|
boolValue := value.(bool)
|
||||||
|
if boolValue {
|
||||||
|
attrs[def.AttrName] = &opsworksTrueString
|
||||||
|
} else {
|
||||||
|
attrs[def.AttrName] = &opsworksFalseString
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// should never happen
|
||||||
|
panic(fmt.Errorf("Unsupported OpsWorks layer attribute type"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return attrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) SetAttributeMap(d *schema.ResourceData, attrs map[string]*string) {
|
||||||
|
for key, def := range lt.Attributes {
|
||||||
|
// Ignore write-only attributes; we'll just keep what we already have stored.
|
||||||
|
// (The AWS API returns garbage placeholder values for these.)
|
||||||
|
if def.WriteOnly {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strPtr, ok := attrs[def.AttrName]; ok && strPtr != nil {
|
||||||
|
strValue := *strPtr
|
||||||
|
|
||||||
|
switch def.Type {
|
||||||
|
case schema.TypeString:
|
||||||
|
d.Set(key, strValue)
|
||||||
|
case schema.TypeInt:
|
||||||
|
intValue, err := strconv.Atoi(strValue)
|
||||||
|
if err == nil {
|
||||||
|
d.Set(key, intValue)
|
||||||
|
} else {
|
||||||
|
// Got garbage from the AWS API
|
||||||
|
d.Set(key, nil)
|
||||||
|
}
|
||||||
|
case schema.TypeBool:
|
||||||
|
boolValue := true
|
||||||
|
if strValue == opsworksFalseString {
|
||||||
|
boolValue = false
|
||||||
|
}
|
||||||
|
d.Set(key, boolValue)
|
||||||
|
default:
|
||||||
|
// should never happen
|
||||||
|
panic(fmt.Errorf("Unsupported OpsWorks layer attribute type"))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
} else {
|
||||||
|
d.Set(key, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) LifecycleEventConfiguration(d *schema.ResourceData) *opsworks.LifecycleEventConfiguration {
|
||||||
|
return &opsworks.LifecycleEventConfiguration{
|
||||||
|
Shutdown: &opsworks.ShutdownEventConfiguration{
|
||||||
|
DelayUntilElbConnectionsDrained: aws.Bool(d.Get("drain_elb_on_shutdown").(bool)),
|
||||||
|
ExecutionTimeout: aws.Int64(int64(d.Get("instance_shutdown_timeout").(int))),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) SetLifecycleEventConfiguration(d *schema.ResourceData, v *opsworks.LifecycleEventConfiguration) {
|
||||||
|
if v == nil || v.Shutdown == nil {
|
||||||
|
d.Set("drain_elb_on_shutdown", nil)
|
||||||
|
d.Set("instance_shutdown_timeout", nil)
|
||||||
|
} else {
|
||||||
|
d.Set("drain_elb_on_shutdown", v.Shutdown.DelayUntilElbConnectionsDrained)
|
||||||
|
d.Set("instance_shutdown_timeout", v.Shutdown.ExecutionTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) CustomRecipes(d *schema.ResourceData) *opsworks.Recipes {
|
||||||
|
return &opsworks.Recipes{
|
||||||
|
Configure: makeAwsStringList(d.Get("custom_configure_recipes").([]interface{})),
|
||||||
|
Deploy: makeAwsStringList(d.Get("custom_deploy_recipes").([]interface{})),
|
||||||
|
Setup: makeAwsStringList(d.Get("custom_setup_recipes").([]interface{})),
|
||||||
|
Shutdown: makeAwsStringList(d.Get("custom_shutdown_recipes").([]interface{})),
|
||||||
|
Undeploy: makeAwsStringList(d.Get("custom_undeploy_recipes").([]interface{})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) SetCustomRecipes(d *schema.ResourceData, v *opsworks.Recipes) {
|
||||||
|
// Null out everything first, and then we'll consider what to put back.
|
||||||
|
d.Set("custom_configure_recipes", nil)
|
||||||
|
d.Set("custom_deploy_recipes", nil)
|
||||||
|
d.Set("custom_setup_recipes", nil)
|
||||||
|
d.Set("custom_shutdown_recipes", nil)
|
||||||
|
d.Set("custom_undeploy_recipes", nil)
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("custom_configure_recipes", unwrapAwsStringList(v.Configure))
|
||||||
|
d.Set("custom_deploy_recipes", unwrapAwsStringList(v.Deploy))
|
||||||
|
d.Set("custom_setup_recipes", unwrapAwsStringList(v.Setup))
|
||||||
|
d.Set("custom_shutdown_recipes", unwrapAwsStringList(v.Shutdown))
|
||||||
|
d.Set("custom_undeploy_recipes", unwrapAwsStringList(v.Undeploy))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) VolumeConfigurations(d *schema.ResourceData) []*opsworks.VolumeConfiguration {
|
||||||
|
configuredVolumes := d.Get("ebs_volume").(*schema.Set).List()
|
||||||
|
result := make([]*opsworks.VolumeConfiguration, len(configuredVolumes))
|
||||||
|
|
||||||
|
for i := 0; i < len(configuredVolumes); i++ {
|
||||||
|
volumeData := configuredVolumes[i].(map[string]interface{})
|
||||||
|
|
||||||
|
result[i] = &opsworks.VolumeConfiguration{
|
||||||
|
MountPoint: aws.String(volumeData["mount_point"].(string)),
|
||||||
|
NumberOfDisks: aws.Int64(int64(volumeData["number_of_disks"].(int))),
|
||||||
|
Size: aws.Int64(int64(volumeData["size"].(int))),
|
||||||
|
VolumeType: aws.String(volumeData["type"].(string)),
|
||||||
|
}
|
||||||
|
iops := int64(volumeData["iops"].(int))
|
||||||
|
if iops != 0 {
|
||||||
|
result[i].Iops = aws.Int64(iops)
|
||||||
|
}
|
||||||
|
|
||||||
|
raidLevelStr := volumeData["raid_level"].(string)
|
||||||
|
if raidLevelStr != "" {
|
||||||
|
raidLevel, err := strconv.Atoi(raidLevelStr)
|
||||||
|
if err == nil {
|
||||||
|
result[i].RaidLevel = aws.Int64(int64(raidLevel))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lt *opsworksLayerType) SetVolumeConfigurations(d *schema.ResourceData, v []*opsworks.VolumeConfiguration) {
|
||||||
|
newValue := make([]*map[string]interface{}, len(v))
|
||||||
|
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
config := v[i]
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
newValue[i] = &data
|
||||||
|
|
||||||
|
if config.Iops != nil {
|
||||||
|
data["iops"] = int(*config.Iops)
|
||||||
|
} else {
|
||||||
|
data["iops"] = 0
|
||||||
|
}
|
||||||
|
if config.MountPoint != nil {
|
||||||
|
data["mount_point"] = *config.MountPoint
|
||||||
|
}
|
||||||
|
if config.NumberOfDisks != nil {
|
||||||
|
data["number_of_disks"] = int(*config.NumberOfDisks)
|
||||||
|
}
|
||||||
|
if config.RaidLevel != nil {
|
||||||
|
data["raid_level"] = strconv.Itoa(int(*config.RaidLevel))
|
||||||
|
}
|
||||||
|
if config.Size != nil {
|
||||||
|
data["size"] = int(*config.Size)
|
||||||
|
}
|
||||||
|
if config.VolumeType != nil {
|
||||||
|
data["type"] = *config.VolumeType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("ebs_volume", newValue)
|
||||||
|
}
|
|
@ -163,24 +163,31 @@ func Provider() terraform.ResourceProvider {
|
||||||
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
|
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
|
||||||
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
|
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
|
||||||
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
|
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
|
||||||
|
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
|
||||||
|
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
|
||||||
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
|
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
|
||||||
"aws_customer_gateway": resourceAwsCustomerGateway(),
|
"aws_customer_gateway": resourceAwsCustomerGateway(),
|
||||||
"aws_db_instance": resourceAwsDbInstance(),
|
"aws_db_instance": resourceAwsDbInstance(),
|
||||||
"aws_db_parameter_group": resourceAwsDbParameterGroup(),
|
"aws_db_parameter_group": resourceAwsDbParameterGroup(),
|
||||||
"aws_db_security_group": resourceAwsDbSecurityGroup(),
|
"aws_db_security_group": resourceAwsDbSecurityGroup(),
|
||||||
"aws_db_subnet_group": resourceAwsDbSubnetGroup(),
|
"aws_db_subnet_group": resourceAwsDbSubnetGroup(),
|
||||||
|
"aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(),
|
||||||
"aws_dynamodb_table": resourceAwsDynamoDbTable(),
|
"aws_dynamodb_table": resourceAwsDynamoDbTable(),
|
||||||
"aws_ebs_volume": resourceAwsEbsVolume(),
|
"aws_ebs_volume": resourceAwsEbsVolume(),
|
||||||
"aws_ecs_cluster": resourceAwsEcsCluster(),
|
"aws_ecs_cluster": resourceAwsEcsCluster(),
|
||||||
"aws_ecs_service": resourceAwsEcsService(),
|
"aws_ecs_service": resourceAwsEcsService(),
|
||||||
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
|
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
|
||||||
|
"aws_efs_file_system": resourceAwsEfsFileSystem(),
|
||||||
|
"aws_efs_mount_target": resourceAwsEfsMountTarget(),
|
||||||
"aws_eip": resourceAwsEip(),
|
"aws_eip": resourceAwsEip(),
|
||||||
"aws_elasticache_cluster": resourceAwsElasticacheCluster(),
|
"aws_elasticache_cluster": resourceAwsElasticacheCluster(),
|
||||||
"aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(),
|
"aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(),
|
||||||
"aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(),
|
"aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(),
|
||||||
"aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(),
|
"aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(),
|
||||||
|
"aws_elasticsearch_domain": resourceAwsElasticSearchDomain(),
|
||||||
"aws_elb": resourceAwsElb(),
|
"aws_elb": resourceAwsElb(),
|
||||||
"aws_flow_log": resourceAwsFlowLog(),
|
"aws_flow_log": resourceAwsFlowLog(),
|
||||||
|
"aws_glacier_vault": resourceAwsGlacierVault(),
|
||||||
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
||||||
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
||||||
"aws_iam_group": resourceAwsIamGroup(),
|
"aws_iam_group": resourceAwsIamGroup(),
|
||||||
|
@ -190,6 +197,7 @@ func Provider() terraform.ResourceProvider {
|
||||||
"aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(),
|
"aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(),
|
||||||
"aws_iam_role_policy": resourceAwsIamRolePolicy(),
|
"aws_iam_role_policy": resourceAwsIamRolePolicy(),
|
||||||
"aws_iam_role": resourceAwsIamRole(),
|
"aws_iam_role": resourceAwsIamRole(),
|
||||||
|
"aws_iam_saml_provider": resourceAwsIamSamlProvider(),
|
||||||
"aws_iam_server_certificate": resourceAwsIAMServerCertificate(),
|
"aws_iam_server_certificate": resourceAwsIAMServerCertificate(),
|
||||||
"aws_iam_user_policy": resourceAwsIamUserPolicy(),
|
"aws_iam_user_policy": resourceAwsIamUserPolicy(),
|
||||||
"aws_iam_user": resourceAwsIamUser(),
|
"aws_iam_user": resourceAwsIamUser(),
|
||||||
|
@ -203,7 +211,21 @@ func Provider() terraform.ResourceProvider {
|
||||||
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
|
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
|
||||||
"aws_network_acl": resourceAwsNetworkAcl(),
|
"aws_network_acl": resourceAwsNetworkAcl(),
|
||||||
"aws_network_interface": resourceAwsNetworkInterface(),
|
"aws_network_interface": resourceAwsNetworkInterface(),
|
||||||
|
"aws_opsworks_stack": resourceAwsOpsworksStack(),
|
||||||
|
"aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(),
|
||||||
|
"aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(),
|
||||||
|
"aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(),
|
||||||
|
"aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(),
|
||||||
|
"aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(),
|
||||||
|
"aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(),
|
||||||
|
"aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(),
|
||||||
|
"aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(),
|
||||||
|
"aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(),
|
||||||
|
"aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(),
|
||||||
|
"aws_placement_group": resourceAwsPlacementGroup(),
|
||||||
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
|
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
|
||||||
|
"aws_rds_cluster": resourceAwsRDSCluster(),
|
||||||
|
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
|
||||||
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
|
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
|
||||||
"aws_route53_record": resourceAwsRoute53Record(),
|
"aws_route53_record": resourceAwsRoute53Record(),
|
||||||
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
|
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
|
||||||
|
|
|
@ -130,7 +130,7 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
image := res.Images[0]
|
image := res.Images[0]
|
||||||
state := *(image.State)
|
state := *image.State
|
||||||
|
|
||||||
if state == "pending" {
|
if state == "pending" {
|
||||||
// This could happen if a user manually adds an image we didn't create
|
// This could happen if a user manually adds an image we didn't create
|
||||||
|
@ -142,7 +142,7 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
state = *(image.State)
|
state = *image.State
|
||||||
}
|
}
|
||||||
|
|
||||||
if state == "deregistered" {
|
if state == "deregistered" {
|
||||||
|
@ -170,22 +170,22 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
for _, blockDev := range image.BlockDeviceMappings {
|
for _, blockDev := range image.BlockDeviceMappings {
|
||||||
if blockDev.Ebs != nil {
|
if blockDev.Ebs != nil {
|
||||||
ebsBlockDev := map[string]interface{}{
|
ebsBlockDev := map[string]interface{}{
|
||||||
"device_name": *(blockDev.DeviceName),
|
"device_name": *blockDev.DeviceName,
|
||||||
"delete_on_termination": *(blockDev.Ebs.DeleteOnTermination),
|
"delete_on_termination": *blockDev.Ebs.DeleteOnTermination,
|
||||||
"encrypted": *(blockDev.Ebs.Encrypted),
|
"encrypted": *blockDev.Ebs.Encrypted,
|
||||||
"iops": 0,
|
"iops": 0,
|
||||||
"snapshot_id": *(blockDev.Ebs.SnapshotId),
|
"snapshot_id": *blockDev.Ebs.SnapshotId,
|
||||||
"volume_size": int(*(blockDev.Ebs.VolumeSize)),
|
"volume_size": int(*blockDev.Ebs.VolumeSize),
|
||||||
"volume_type": *(blockDev.Ebs.VolumeType),
|
"volume_type": *blockDev.Ebs.VolumeType,
|
||||||
}
|
}
|
||||||
if blockDev.Ebs.Iops != nil {
|
if blockDev.Ebs.Iops != nil {
|
||||||
ebsBlockDev["iops"] = int(*(blockDev.Ebs.Iops))
|
ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops)
|
||||||
}
|
}
|
||||||
ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev)
|
ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev)
|
||||||
} else {
|
} else {
|
||||||
ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{
|
ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{
|
||||||
"device_name": *(blockDev.DeviceName),
|
"device_name": *blockDev.DeviceName,
|
||||||
"virtual_name": *(blockDev.VirtualName),
|
"virtual_name": *blockDev.VirtualName,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -301,7 +301,7 @@ func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, err
|
||||||
return nil, fmt.Errorf("new AMI vanished while pending")
|
return nil, fmt.Errorf("new AMI vanished while pending")
|
||||||
}
|
}
|
||||||
|
|
||||||
state := *(res.Images[0].State)
|
state := *res.Images[0].State
|
||||||
|
|
||||||
if state == "pending" {
|
if state == "pending" {
|
||||||
// Give it a few seconds before we poll again.
|
// Give it a few seconds before we poll again.
|
||||||
|
@ -316,7 +316,7 @@ func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, err
|
||||||
|
|
||||||
// If we're not pending or available then we're in one of the invalid/error
|
// If we're not pending or available then we're in one of the invalid/error
|
||||||
// states, so stop polling and bail out.
|
// states, so stop polling and bail out.
|
||||||
stateReason := *(res.Images[0].StateReason)
|
stateReason := *res.Images[0].StateReason
|
||||||
return nil, fmt.Errorf("new AMI became %s while pending: %s", state, stateReason)
|
return nil, fmt.Errorf("new AMI became %s while pending: %s", state, stateReason)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -15,8 +16,6 @@ func resourceAwsAppCookieStickinessPolicy() *schema.Resource {
|
||||||
// There is no concept of "updating" an App Stickiness policy in
|
// There is no concept of "updating" an App Stickiness policy in
|
||||||
// the AWS API.
|
// the AWS API.
|
||||||
Create: resourceAwsAppCookieStickinessPolicyCreate,
|
Create: resourceAwsAppCookieStickinessPolicyCreate,
|
||||||
Update: resourceAwsAppCookieStickinessPolicyCreate,
|
|
||||||
|
|
||||||
Read: resourceAwsAppCookieStickinessPolicyRead,
|
Read: resourceAwsAppCookieStickinessPolicyRead,
|
||||||
Delete: resourceAwsAppCookieStickinessPolicyDelete,
|
Delete: resourceAwsAppCookieStickinessPolicyDelete,
|
||||||
|
|
||||||
|
@ -25,6 +24,14 @@ func resourceAwsAppCookieStickinessPolicy() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
||||||
|
es = append(es, fmt.Errorf(
|
||||||
|
"only alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"load_balancer": &schema.Schema{
|
"load_balancer": &schema.Schema{
|
||||||
|
|
|
@ -73,8 +73,7 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||||
"force_delete": &schema.Schema{
|
"force_delete": &schema.Schema{
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Default: false,
|
||||||
ForceNew: true,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"health_check_grace_period": &schema.Schema{
|
"health_check_grace_period": &schema.Schema{
|
||||||
|
@ -120,6 +119,25 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"wait_for_capacity_timeout": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "10m",
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
duration, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be parsed as a duration: %s", k, err))
|
||||||
|
}
|
||||||
|
if duration < 0 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must be greater than zero", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
"tag": autoscalingTagsSchema(),
|
"tag": autoscalingTagsSchema(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -334,15 +352,9 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id())
|
log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id())
|
||||||
deleteopts := autoscaling.DeleteAutoScalingGroupInput{AutoScalingGroupName: aws.String(d.Id())}
|
deleteopts := autoscaling.DeleteAutoScalingGroupInput{
|
||||||
|
AutoScalingGroupName: aws.String(d.Id()),
|
||||||
// You can force an autoscaling group to delete
|
ForceDelete: aws.Bool(d.Get("force_delete").(bool)),
|
||||||
// even if it's in the process of scaling a resource.
|
|
||||||
// Normally, you would set the min-size and max-size to 0,0
|
|
||||||
// and then delete the group. This bypasses that and leaves
|
|
||||||
// resources potentially dangling.
|
|
||||||
if d.Get("force_delete").(bool) {
|
|
||||||
deleteopts.ForceDelete = aws.Bool(true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We retry the delete operation to handle InUse/InProgress errors coming
|
// We retry the delete operation to handle InUse/InProgress errors coming
|
||||||
|
@ -414,6 +426,11 @@ func getAwsAutoscalingGroup(
|
||||||
func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).autoscalingconn
|
conn := meta.(*AWSClient).autoscalingconn
|
||||||
|
|
||||||
|
if d.Get("force_delete").(bool) {
|
||||||
|
log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// First, set the capacity to zero so the group will drain
|
// First, set the capacity to zero so the group will drain
|
||||||
log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
|
log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
|
||||||
opts := autoscaling.UpdateAutoScalingGroupInput{
|
opts := autoscaling.UpdateAutoScalingGroupInput{
|
||||||
|
@ -445,8 +462,6 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var waitForASGCapacityTimeout = 10 * time.Minute
|
|
||||||
|
|
||||||
// Waits for a minimum number of healthy instances to show up as healthy in the
|
// Waits for a minimum number of healthy instances to show up as healthy in the
|
||||||
// ASG before continuing. Waits up to `waitForASGCapacityTimeout` for
|
// ASG before continuing. Waits up to `waitForASGCapacityTimeout` for
|
||||||
// "desired_capacity", or "min_size" if desired capacity is not specified.
|
// "desired_capacity", or "min_size" if desired capacity is not specified.
|
||||||
|
@ -461,9 +476,20 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
wantELB := d.Get("min_elb_capacity").(int)
|
wantELB := d.Get("min_elb_capacity").(int)
|
||||||
|
|
||||||
log.Printf("[DEBUG] Waiting for capacity: %d ASG, %d ELB", wantASG, wantELB)
|
wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return resource.Retry(waitForASGCapacityTimeout, func() error {
|
if wait == 0 {
|
||||||
|
log.Printf("[DEBUG] Capacity timeout set to 0, skipping capacity waiting.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Waiting %s for capacity: %d ASG, %d ELB",
|
||||||
|
wait, wantASG, wantELB)
|
||||||
|
|
||||||
|
return resource.Retry(wait, func() error {
|
||||||
g, err := getAwsAutoscalingGroup(d, meta)
|
g, err := getAwsAutoscalingGroup(d, meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resource.RetryError{Err: err}
|
return resource.RetryError{Err: err}
|
||||||
|
|
|
@ -0,0 +1,175 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsAutoscalingLifecycleHook() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsAutoscalingLifecycleHookPut,
|
||||||
|
Read: resourceAwsAutoscalingLifecycleHookRead,
|
||||||
|
Update: resourceAwsAutoscalingLifecycleHookPut,
|
||||||
|
Delete: resourceAwsAutoscalingLifecycleHookDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"autoscaling_group_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"default_result": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"heartbeat_timeout": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"lifecycle_transition": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"notification_metadata": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"notification_target_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"role_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
autoscalingconn := meta.(*AWSClient).autoscalingconn
|
||||||
|
|
||||||
|
params := getAwsAutoscalingPutLifecycleHookInput(d)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %#v", params)
|
||||||
|
_, err := autoscalingconn.PutLifecycleHook(¶ms)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error putting lifecycle hook: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(d.Get("name").(string))
|
||||||
|
|
||||||
|
return resourceAwsAutoscalingLifecycleHookRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsAutoscalingLifecycleHookRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
p, err := getAwsAutoscalingLifecycleHook(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if p == nil {
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Read Lifecycle Hook: ASG: %s, SH: %s, Obj: %#v", d.Get("autoscaling_group_name"), d.Get("name"), p)
|
||||||
|
|
||||||
|
d.Set("default_result", p.DefaultResult)
|
||||||
|
d.Set("heartbeat_timeout", p.HeartbeatTimeout)
|
||||||
|
d.Set("lifecycle_transition", p.LifecycleTransition)
|
||||||
|
d.Set("notification_metadata", p.NotificationMetadata)
|
||||||
|
d.Set("notification_target_arn", p.NotificationTargetARN)
|
||||||
|
d.Set("name", p.LifecycleHookName)
|
||||||
|
d.Set("role_arn", p.RoleARN)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsAutoscalingLifecycleHookDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
autoscalingconn := meta.(*AWSClient).autoscalingconn
|
||||||
|
p, err := getAwsAutoscalingLifecycleHook(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
params := autoscaling.DeleteLifecycleHookInput{
|
||||||
|
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
|
||||||
|
LifecycleHookName: aws.String(d.Get("name").(string)),
|
||||||
|
}
|
||||||
|
if _, err := autoscalingconn.DeleteLifecycleHook(¶ms); err != nil {
|
||||||
|
return fmt.Errorf("Autoscaling Lifecycle Hook: %s ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAwsAutoscalingPutLifecycleHookInput(d *schema.ResourceData) autoscaling.PutLifecycleHookInput {
|
||||||
|
var params = autoscaling.PutLifecycleHookInput{
|
||||||
|
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
|
||||||
|
LifecycleHookName: aws.String(d.Get("name").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("default_result"); ok {
|
||||||
|
params.DefaultResult = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("heartbeat_timeout"); ok {
|
||||||
|
params.HeartbeatTimeout = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("lifecycle_transition"); ok {
|
||||||
|
params.LifecycleTransition = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("notification_metadata"); ok {
|
||||||
|
params.NotificationMetadata = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("notification_target_arn"); ok {
|
||||||
|
params.NotificationTargetARN = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("role_arn"); ok {
|
||||||
|
params.RoleARN = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
return params
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAwsAutoscalingLifecycleHook(d *schema.ResourceData, meta interface{}) (*autoscaling.LifecycleHook, error) {
|
||||||
|
autoscalingconn := meta.(*AWSClient).autoscalingconn
|
||||||
|
|
||||||
|
params := autoscaling.DescribeLifecycleHooksInput{
|
||||||
|
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
|
||||||
|
LifecycleHookNames: []*string{aws.String(d.Get("name").(string))},
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] AutoScaling Lifecycle Hook Describe Params: %#v", params)
|
||||||
|
resp, err := autoscalingconn.DescribeLifecycleHooks(¶ms)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error retrieving lifecycle hooks: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// find lifecycle hooks
|
||||||
|
name := d.Get("name")
|
||||||
|
for idx, sp := range resp.LifecycleHooks {
|
||||||
|
if *sp.LifecycleHookName == name {
|
||||||
|
return resp.LifecycleHooks[idx], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lifecycle hook not found
|
||||||
|
return nil, nil
|
||||||
|
}
|
|
@ -0,0 +1,168 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSAutoscalingLifecycleHook_basic(t *testing.T) {
|
||||||
|
var hook autoscaling.LifecycleHook
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSAutoscalingLifecycleHookDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSAutoscalingLifecycleHookConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckLifecycleHookExists("aws_autoscaling_lifecycle_hook.foobar", &hook),
|
||||||
|
resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "autoscaling_group_name", "terraform-test-foobar5"),
|
||||||
|
resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "default_result", "CONTINUE"),
|
||||||
|
resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "heartbeat_timeout", "2000"),
|
||||||
|
resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "lifecycle_transition", "autoscaling:EC2_INSTANCE_LAUNCHING"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckLifecycleHookExists(n string, hook *autoscaling.LifecycleHook) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
rs = rs
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
|
||||||
|
params := &autoscaling.DescribeLifecycleHooksInput{
|
||||||
|
AutoScalingGroupName: aws.String(rs.Primary.Attributes["autoscaling_group_name"]),
|
||||||
|
LifecycleHookNames: []*string{aws.String(rs.Primary.ID)},
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeLifecycleHooks(params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(resp.LifecycleHooks) == 0 {
|
||||||
|
return fmt.Errorf("LifecycleHook not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSAutoscalingLifecycleHookDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_autoscaling_group" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
params := autoscaling.DescribeLifecycleHooksInput{
|
||||||
|
AutoScalingGroupName: aws.String(rs.Primary.Attributes["autoscaling_group_name"]),
|
||||||
|
LifecycleHookNames: []*string{aws.String(rs.Primary.ID)},
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := conn.DescribeLifecycleHooks(¶ms)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if len(resp.LifecycleHooks) != 0 &&
|
||||||
|
*resp.LifecycleHooks[0].LifecycleHookName == rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Lifecycle Hook Still Exists: %s", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccAWSAutoscalingLifecycleHookConfig = fmt.Sprintf(`
|
||||||
|
resource "aws_launch_configuration" "foobar" {
|
||||||
|
name = "terraform-test-foobar5"
|
||||||
|
image_id = "ami-21f78e11"
|
||||||
|
instance_type = "t1.micro"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_sqs_queue" "foobar" {
|
||||||
|
name = "foobar"
|
||||||
|
delay_seconds = 90
|
||||||
|
max_message_size = 2048
|
||||||
|
message_retention_seconds = 86400
|
||||||
|
receive_wait_time_seconds = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "foobar" {
|
||||||
|
name = "foobar"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version" : "2012-10-17",
|
||||||
|
"Statement": [ {
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": {"AWS": "*"},
|
||||||
|
"Action": [ "sts:AssumeRole" ]
|
||||||
|
} ]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "foobar" {
|
||||||
|
name = "foobar"
|
||||||
|
role = "${aws_iam_role.foobar.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version" : "2012-10-17",
|
||||||
|
"Statement": [ {
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": [
|
||||||
|
"sqs:SendMessage",
|
||||||
|
"sqs:GetQueueUrl",
|
||||||
|
"sns:Publish"
|
||||||
|
],
|
||||||
|
"Resource": [
|
||||||
|
"${aws_sqs_queue.foobar.arn}"
|
||||||
|
]
|
||||||
|
} ]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "foobar" {
|
||||||
|
availability_zones = ["us-west-2a"]
|
||||||
|
name = "terraform-test-foobar5"
|
||||||
|
max_size = 5
|
||||||
|
min_size = 2
|
||||||
|
health_check_grace_period = 300
|
||||||
|
health_check_type = "ELB"
|
||||||
|
force_delete = true
|
||||||
|
termination_policies = ["OldestInstance"]
|
||||||
|
launch_configuration = "${aws_launch_configuration.foobar.name}"
|
||||||
|
tag {
|
||||||
|
key = "Foo"
|
||||||
|
value = "foo-bar"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_lifecycle_hook" "foobar" {
|
||||||
|
name = "foobar"
|
||||||
|
autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
|
||||||
|
default_result = "CONTINUE"
|
||||||
|
heartbeat_timeout = 2000
|
||||||
|
lifecycle_transition = "autoscaling:EC2_INSTANCE_LAUNCHING"
|
||||||
|
notification_metadata = <<EOF
|
||||||
|
{
|
||||||
|
"foo": "bar"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
notification_target_arn = "${aws_sqs_queue.foobar.arn}"
|
||||||
|
role_arn = "${aws_iam_role.foobar.arn}"
|
||||||
|
}
|
||||||
|
`)
|
|
@ -0,0 +1,146 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsCloudWatchLogGroup() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsCloudWatchLogGroupCreate,
|
||||||
|
Read: resourceAwsCloudWatchLogGroupRead,
|
||||||
|
Update: resourceAwsCloudWatchLogGroupUpdate,
|
||||||
|
Delete: resourceAwsCloudWatchLogGroupDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"retention_in_days": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 0,
|
||||||
|
},
|
||||||
|
|
||||||
|
"arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).cloudwatchlogsconn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string))
|
||||||
|
_, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{
|
||||||
|
LogGroupName: aws.String(d.Get("name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Creating CloudWatch Log Group failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(d.Get("name").(string))
|
||||||
|
|
||||||
|
log.Println("[INFO] CloudWatch Log Group created")
|
||||||
|
|
||||||
|
return resourceAwsCloudWatchLogGroupUpdate(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).cloudwatchlogsconn
|
||||||
|
log.Printf("[DEBUG] Reading CloudWatch Log Group: %q", d.Get("name").(string))
|
||||||
|
lg, err := lookupCloudWatchLogGroup(conn, d.Get("name").(string), nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Found Log Group: %#v", *lg)
|
||||||
|
|
||||||
|
d.Set("arn", *lg.Arn)
|
||||||
|
d.Set("name", *lg.LogGroupName)
|
||||||
|
|
||||||
|
if lg.RetentionInDays != nil {
|
||||||
|
d.Set("retention_in_days", *lg.RetentionInDays)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs,
|
||||||
|
name string, nextToken *string) (*cloudwatchlogs.LogGroup, error) {
|
||||||
|
input := &cloudwatchlogs.DescribeLogGroupsInput{
|
||||||
|
LogGroupNamePrefix: aws.String(name),
|
||||||
|
NextToken: nextToken,
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeLogGroups(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lg := range resp.LogGroups {
|
||||||
|
if *lg.LogGroupName == name {
|
||||||
|
return lg, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.NextToken != nil {
|
||||||
|
return lookupCloudWatchLogGroup(conn, name, resp.NextToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("CloudWatch Log Group %q not found", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).cloudwatchlogsconn
|
||||||
|
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
log.Printf("[DEBUG] Updating CloudWatch Log Group: %q", name)
|
||||||
|
|
||||||
|
if d.HasChange("retention_in_days") {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("retention_in_days"); ok {
|
||||||
|
input := cloudwatchlogs.PutRetentionPolicyInput{
|
||||||
|
LogGroupName: aws.String(name),
|
||||||
|
RetentionInDays: aws.Int64(int64(v.(int))),
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Setting retention for CloudWatch Log Group: %q: %s", name, input)
|
||||||
|
_, err = conn.PutRetentionPolicy(&input)
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Deleting retention for CloudWatch Log Group: %q", name)
|
||||||
|
_, err = conn.DeleteRetentionPolicy(&cloudwatchlogs.DeleteRetentionPolicyInput{
|
||||||
|
LogGroupName: aws.String(name),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsCloudWatchLogGroupRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).cloudwatchlogsconn
|
||||||
|
log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id())
|
||||||
|
_, err := conn.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{
|
||||||
|
LogGroupName: aws.String(d.Get("name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting CloudWatch Log Group: %s", err)
|
||||||
|
}
|
||||||
|
log.Println("[INFO] CloudWatch Log Group deleted")
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,147 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSCloudWatchLogGroup_basic(t *testing.T) {
|
||||||
|
var lg cloudwatchlogs.LogGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSCloudWatchLogGroupConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCloudWatchLogGroup_retentionPolicy(t *testing.T) {
|
||||||
|
var lg cloudwatchlogs.LogGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSCloudWatchLogGroupConfig_withRetention,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "365"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSCloudWatchLogGroupConfigModified_withRetention,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCloudWatchLogGroup_multiple(t *testing.T) {
|
||||||
|
var lg cloudwatchlogs.LogGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSCloudWatchLogGroupConfig_multiple,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.alpha", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.alpha", "retention_in_days", "14"),
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.beta", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.beta", "retention_in_days", "0"),
|
||||||
|
testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.charlie", &lg),
|
||||||
|
resource.TestCheckResourceAttr("aws_cloudwatch_log_group.charlie", "retention_in_days", "3653"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckCloudWatchLogGroupExists(n string, lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn
|
||||||
|
logGroup, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*lg = *logGroup
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSCloudWatchLogGroupDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_cloudwatch_log_group" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("LogGroup Still Exists: %s", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccAWSCloudWatchLogGroupConfig = `
|
||||||
|
resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
|
name = "foo-bar"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSCloudWatchLogGroupConfig_withRetention = `
|
||||||
|
resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
|
name = "foo-bang"
|
||||||
|
retention_in_days = 365
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSCloudWatchLogGroupConfigModified_withRetention = `
|
||||||
|
resource "aws_cloudwatch_log_group" "foobar" {
|
||||||
|
name = "foo-bang"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSCloudWatchLogGroupConfig_multiple = `
|
||||||
|
resource "aws_cloudwatch_log_group" "alpha" {
|
||||||
|
name = "foo-bar"
|
||||||
|
retention_in_days = 14
|
||||||
|
}
|
||||||
|
resource "aws_cloudwatch_log_group" "beta" {
|
||||||
|
name = "foo-bara"
|
||||||
|
}
|
||||||
|
resource "aws_cloudwatch_log_group" "charlie" {
|
||||||
|
name = "foo-baraa"
|
||||||
|
retention_in_days = 3653
|
||||||
|
}
|
||||||
|
`
|
|
@ -78,26 +78,7 @@ func resourceAwsDbInstance() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
ValidateFunc: validateRdsId,
|
||||||
value := v.(string)
|
|
||||||
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
|
||||||
}
|
|
||||||
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"first character of %q must be a letter", k))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`--`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot contain two consecutive hyphens", k))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot end with a hyphen", k))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_class": &schema.Schema{
|
"instance_class": &schema.Schema{
|
||||||
|
@ -524,7 +505,6 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
if v.DBName != nil && *v.DBName != "" {
|
if v.DBName != nil && *v.DBName != "" {
|
||||||
name = *v.DBName
|
name = *v.DBName
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name)
|
log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name)
|
||||||
} else {
|
} else {
|
||||||
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
|
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@ func resourceAwsDbParameterGroup() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
ValidateFunc: validateDbParamGroupName,
|
||||||
},
|
},
|
||||||
"family": &schema.Schema{
|
"family": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -227,3 +229,29 @@ func resourceAwsDbParameterHash(v interface{}) int {
|
||||||
|
|
||||||
return hashcode.String(buf.String())
|
return hashcode.String(buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"first character of %q must be a letter", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot end with a hyphen", k))
|
||||||
|
}
|
||||||
|
if len(value) > 255 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be greater than 255 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,9 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
@ -106,6 +108,46 @@ func TestAccAWSDBParameterGroupOnly(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestResourceAWSDBParameterGroupName_validation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "tEsting123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing123!",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "1testing123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing--123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing123-",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: randomString(256),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateDbParamGroupName(tc.Value, "aws_db_parameter_group_name")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the DB Parameter Group Name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error {
|
func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
@ -193,6 +235,16 @@ func testAccCheckAWSDBParameterGroupExists(n string, v *rds.DBParameterGroup) re
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func randomString(strlen int) string {
|
||||||
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
const chars = "abcdefghijklmnopqrstuvwxyz"
|
||||||
|
result := make([]byte, strlen)
|
||||||
|
for i := 0; i < strlen; i++ {
|
||||||
|
result[i] = chars[rand.Intn(len(chars))]
|
||||||
|
}
|
||||||
|
return string(result)
|
||||||
|
}
|
||||||
|
|
||||||
const testAccAWSDBParameterGroupConfig = `
|
const testAccAWSDBParameterGroupConfig = `
|
||||||
resource "aws_db_parameter_group" "bar" {
|
resource "aws_db_parameter_group" "bar" {
|
||||||
name = "parameter-group-test-terraform"
|
name = "parameter-group-test-terraform"
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
"github.com/hashicorp/terraform/helper/multierror"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
@ -56,12 +57,15 @@ func resourceAwsDbSubnetGroup() *schema.Resource {
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"tags": tagsSchema(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
rdsconn := meta.(*AWSClient).rdsconn
|
rdsconn := meta.(*AWSClient).rdsconn
|
||||||
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
subnetIdsSet := d.Get("subnet_ids").(*schema.Set)
|
subnetIdsSet := d.Get("subnet_ids").(*schema.Set)
|
||||||
subnetIds := make([]*string, subnetIdsSet.Len())
|
subnetIds := make([]*string, subnetIdsSet.Len())
|
||||||
|
@ -73,6 +77,7 @@ func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) er
|
||||||
DBSubnetGroupName: aws.String(d.Get("name").(string)),
|
DBSubnetGroupName: aws.String(d.Get("name").(string)),
|
||||||
DBSubnetGroupDescription: aws.String(d.Get("description").(string)),
|
DBSubnetGroupDescription: aws.String(d.Get("description").(string)),
|
||||||
SubnetIds: subnetIds,
|
SubnetIds: subnetIds,
|
||||||
|
Tags: tags,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Create DB Subnet Group: %#v", createOpts)
|
log.Printf("[DEBUG] Create DB Subnet Group: %#v", createOpts)
|
||||||
|
@ -130,6 +135,28 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
}
|
}
|
||||||
d.Set("subnet_ids", subnets)
|
d.Set("subnet_ids", subnets)
|
||||||
|
|
||||||
|
// list tags for resource
|
||||||
|
// set tags
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
arn, err := buildRDSsubgrpARN(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName)
|
||||||
|
} else {
|
||||||
|
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
|
||||||
|
ResourceName: aws.String(arn),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dt []*rds.Tag
|
||||||
|
if len(resp.TagList) > 0 {
|
||||||
|
dt = resp.TagList
|
||||||
|
}
|
||||||
|
d.Set("tags", tagsToMapRDS(dt))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,6 +183,15 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if arn, err := buildRDSsubgrpARN(d, meta); err == nil {
|
||||||
|
if err := setTagsRDS(conn, d, arn); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
d.SetPartial("tags")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resourceAwsDbSubnetGroupRead(d, meta)
|
return resourceAwsDbSubnetGroupRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,3 +232,17 @@ func resourceAwsDbSubnetGroupDeleteRefreshFunc(
|
||||||
return d, "destroyed", nil
|
return d, "destroyed", nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildRDSsubgrpARN(d *schema.ResourceData, meta interface{}) (string, error) {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
region := meta.(*AWSClient).region
|
||||||
|
// An zero value GetUserInput{} defers to the currently logged in user
|
||||||
|
resp, err := iamconn.GetUser(&iam.GetUserInput{})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
userARN := *resp.User.Arn
|
||||||
|
accountID := strings.Split(userARN, ":")[4]
|
||||||
|
arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id())
|
||||||
|
return arn, nil
|
||||||
|
}
|
||||||
|
|
|
@ -150,6 +150,9 @@ resource "aws_db_subnet_group" "foo" {
|
||||||
name = "FOO"
|
name = "FOO"
|
||||||
description = "foo description"
|
description = "foo description"
|
||||||
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
|
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
|
||||||
|
tags {
|
||||||
|
Name = "tf-dbsubnet-group-test"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,291 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/directoryservice"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsDirectoryServiceDirectory() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsDirectoryServiceDirectoryCreate,
|
||||||
|
Read: resourceAwsDirectoryServiceDirectoryRead,
|
||||||
|
Update: resourceAwsDirectoryServiceDirectoryUpdate,
|
||||||
|
Delete: resourceAwsDirectoryServiceDirectoryDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"password": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"size": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"alias": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"short_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"vpc_settings": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Required: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"subnet_ids": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
"vpc_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"enable_sso": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
"access_url": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"dns_ip_addresses": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
dsconn := meta.(*AWSClient).dsconn
|
||||||
|
|
||||||
|
input := directoryservice.CreateDirectoryInput{
|
||||||
|
Name: aws.String(d.Get("name").(string)),
|
||||||
|
Password: aws.String(d.Get("password").(string)),
|
||||||
|
Size: aws.String(d.Get("size").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
input.Description = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("short_name"); ok {
|
||||||
|
input.ShortName = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("vpc_settings"); ok {
|
||||||
|
settings := v.([]interface{})
|
||||||
|
|
||||||
|
if len(settings) > 1 {
|
||||||
|
return fmt.Errorf("Only a single vpc_settings block is expected")
|
||||||
|
} else if len(settings) == 1 {
|
||||||
|
s := settings[0].(map[string]interface{})
|
||||||
|
var subnetIds []*string
|
||||||
|
for _, id := range s["subnet_ids"].(*schema.Set).List() {
|
||||||
|
subnetIds = append(subnetIds, aws.String(id.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
vpcSettings := directoryservice.DirectoryVpcSettings{
|
||||||
|
SubnetIds: subnetIds,
|
||||||
|
VpcId: aws.String(s["vpc_id"].(string)),
|
||||||
|
}
|
||||||
|
input.VpcSettings = &vpcSettings
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating Directory Service: %s", input)
|
||||||
|
out, err := dsconn.CreateDirectory(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Directory Service created: %s", out)
|
||||||
|
d.SetId(*out.DirectoryId)
|
||||||
|
|
||||||
|
// Wait for creation
|
||||||
|
log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id())
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"Requested", "Creating", "Created"},
|
||||||
|
Target: "Active",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
|
||||||
|
DirectoryIds: []*string{aws.String(d.Id())},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error during creation of DS: %q", err.Error())
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ds := resp.DirectoryDescriptions[0]
|
||||||
|
log.Printf("[DEBUG] Creation of DS %q is in following stage: %q.",
|
||||||
|
d.Id(), *ds.Stage)
|
||||||
|
return ds, *ds.Stage, nil
|
||||||
|
},
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
}
|
||||||
|
if _, err := stateConf.WaitForState(); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for Directory Service (%s) to become available: %#v",
|
||||||
|
d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("alias"); ok {
|
||||||
|
d.SetPartial("alias")
|
||||||
|
|
||||||
|
input := directoryservice.CreateAliasInput{
|
||||||
|
DirectoryId: aws.String(d.Id()),
|
||||||
|
Alias: aws.String(v.(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Assigning alias %q to DS directory %q",
|
||||||
|
v.(string), d.Id())
|
||||||
|
out, err := dsconn.CreateAlias(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Alias %q assigned to DS directory %q",
|
||||||
|
*out.Alias, *out.DirectoryId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsDirectoryServiceDirectoryUpdate(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsDirectoryServiceDirectoryUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
dsconn := meta.(*AWSClient).dsconn
|
||||||
|
|
||||||
|
if d.HasChange("enable_sso") {
|
||||||
|
d.SetPartial("enable_sso")
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("enable_sso"); ok && v.(bool) {
|
||||||
|
log.Printf("[DEBUG] Enabling SSO for DS directory %q", d.Id())
|
||||||
|
_, err = dsconn.EnableSso(&directoryservice.EnableSsoInput{
|
||||||
|
DirectoryId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Disabling SSO for DS directory %q", d.Id())
|
||||||
|
_, err = dsconn.DisableSso(&directoryservice.DisableSsoInput{
|
||||||
|
DirectoryId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsDirectoryServiceDirectoryRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
dsconn := meta.(*AWSClient).dsconn
|
||||||
|
|
||||||
|
input := directoryservice.DescribeDirectoriesInput{
|
||||||
|
DirectoryIds: []*string{aws.String(d.Id())},
|
||||||
|
}
|
||||||
|
out, err := dsconn.DescribeDirectories(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := out.DirectoryDescriptions[0]
|
||||||
|
log.Printf("[DEBUG] Received DS directory: %s", *dir)
|
||||||
|
|
||||||
|
d.Set("access_url", *dir.AccessUrl)
|
||||||
|
d.Set("alias", *dir.Alias)
|
||||||
|
if dir.Description != nil {
|
||||||
|
d.Set("description", *dir.Description)
|
||||||
|
}
|
||||||
|
d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs)))
|
||||||
|
d.Set("name", *dir.Name)
|
||||||
|
if dir.ShortName != nil {
|
||||||
|
d.Set("short_name", *dir.ShortName)
|
||||||
|
}
|
||||||
|
d.Set("size", *dir.Size)
|
||||||
|
d.Set("type", *dir.Type)
|
||||||
|
d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings))
|
||||||
|
d.Set("enable_sso", *dir.SsoEnabled)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
dsconn := meta.(*AWSClient).dsconn
|
||||||
|
|
||||||
|
input := directoryservice.DeleteDirectoryInput{
|
||||||
|
DirectoryId: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
_, err := dsconn.DeleteDirectory(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for deletion
|
||||||
|
log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id())
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"Deleting"},
|
||||||
|
Target: "",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
|
||||||
|
DirectoryIds: []*string{aws.String(d.Id())},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.DirectoryDescriptions) == 0 {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ds := resp.DirectoryDescriptions[0]
|
||||||
|
log.Printf("[DEBUG] Deletion of DS %q is in following stage: %q.",
|
||||||
|
d.Id(), *ds.Stage)
|
||||||
|
return ds, *ds.Stage, nil
|
||||||
|
},
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
}
|
||||||
|
if _, err := stateConf.WaitForState(); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for Directory Service (%s) to be deleted: %q",
|
||||||
|
d.Id(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,283 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/directoryservice"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDirectoryServiceDirectoryConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDirectoryServiceDirectoryConfig_withAlias,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"),
|
||||||
|
testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a",
|
||||||
|
fmt.Sprintf("tf-d-%d", randomInteger)),
|
||||||
|
testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", false),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDirectoryServiceDirectoryConfig_withSso,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"),
|
||||||
|
testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a",
|
||||||
|
fmt.Sprintf("tf-d-%d", randomInteger)),
|
||||||
|
testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", true),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDirectoryServiceDirectoryConfig_withSso_modified,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"),
|
||||||
|
testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a",
|
||||||
|
fmt.Sprintf("tf-d-%d", randomInteger)),
|
||||||
|
testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", false),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckDirectoryServiceDirectoryDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v",
|
||||||
|
s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckServiceDirectoryExists(name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
dsconn := testAccProvider.Meta().(*AWSClient).dsconn
|
||||||
|
out, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
|
||||||
|
DirectoryIds: []*string{aws.String(rs.Primary.ID)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.DirectoryDescriptions) < 1 {
|
||||||
|
return fmt.Errorf("No DS directory found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *out.DirectoryDescriptions[0].DirectoryId != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("DS directory ID mismatch - existing: %q, state: %q",
|
||||||
|
*out.DirectoryDescriptions[0].DirectoryId, rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckServiceDirectoryAlias(name, alias string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
dsconn := testAccProvider.Meta().(*AWSClient).dsconn
|
||||||
|
out, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
|
||||||
|
DirectoryIds: []*string{aws.String(rs.Primary.ID)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if *out.DirectoryDescriptions[0].Alias != alias {
|
||||||
|
return fmt.Errorf("DS directory Alias mismatch - actual: %q, expected: %q",
|
||||||
|
*out.DirectoryDescriptions[0].Alias, alias)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckServiceDirectorySso(name string, ssoEnabled bool) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
dsconn := testAccProvider.Meta().(*AWSClient).dsconn
|
||||||
|
out, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
|
||||||
|
DirectoryIds: []*string{aws.String(rs.Primary.ID)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if *out.DirectoryDescriptions[0].SsoEnabled != ssoEnabled {
|
||||||
|
return fmt.Errorf("DS directory SSO mismatch - actual: %t, expected: %t",
|
||||||
|
*out.DirectoryDescriptions[0].SsoEnabled, ssoEnabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccDirectoryServiceDirectoryConfig = `
|
||||||
|
resource "aws_directory_service_directory" "bar" {
|
||||||
|
name = "corp.notexample.com"
|
||||||
|
password = "SuperSecretPassw0rd"
|
||||||
|
size = "Small"
|
||||||
|
|
||||||
|
vpc_settings {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "main" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "bar" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
cidr_block = "10.0.2.0/24"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var randomInteger = genRandInt()
|
||||||
|
var testAccDirectoryServiceDirectoryConfig_withAlias = fmt.Sprintf(`
|
||||||
|
resource "aws_directory_service_directory" "bar_a" {
|
||||||
|
name = "corp.notexample.com"
|
||||||
|
password = "SuperSecretPassw0rd"
|
||||||
|
size = "Small"
|
||||||
|
alias = "tf-d-%d"
|
||||||
|
|
||||||
|
vpc_settings {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "main" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "bar" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
cidr_block = "10.0.2.0/24"
|
||||||
|
}
|
||||||
|
`, randomInteger)
|
||||||
|
|
||||||
|
var testAccDirectoryServiceDirectoryConfig_withSso = fmt.Sprintf(`
|
||||||
|
resource "aws_directory_service_directory" "bar_a" {
|
||||||
|
name = "corp.notexample.com"
|
||||||
|
password = "SuperSecretPassw0rd"
|
||||||
|
size = "Small"
|
||||||
|
alias = "tf-d-%d"
|
||||||
|
enable_sso = true
|
||||||
|
|
||||||
|
vpc_settings {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "main" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "bar" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
cidr_block = "10.0.2.0/24"
|
||||||
|
}
|
||||||
|
`, randomInteger)
|
||||||
|
|
||||||
|
var testAccDirectoryServiceDirectoryConfig_withSso_modified = fmt.Sprintf(`
|
||||||
|
resource "aws_directory_service_directory" "bar_a" {
|
||||||
|
name = "corp.notexample.com"
|
||||||
|
password = "SuperSecretPassw0rd"
|
||||||
|
size = "Small"
|
||||||
|
alias = "tf-d-%d"
|
||||||
|
enable_sso = false
|
||||||
|
|
||||||
|
vpc_settings {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "main" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "bar" {
|
||||||
|
vpc_id = "${aws_vpc.main.id}"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
cidr_block = "10.0.2.0/24"
|
||||||
|
}
|
||||||
|
`, randomInteger)
|
|
@ -287,7 +287,11 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
|
||||||
} else {
|
} else {
|
||||||
// No error, set ID and return
|
// No error, set ID and return
|
||||||
d.SetId(*output.TableDescription.TableName)
|
d.SetId(*output.TableDescription.TableName)
|
||||||
return nil
|
if err := d.Set("arn", *output.TableDescription.TableArn); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsDynamoDbTableRead(d, meta)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,7 +388,7 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
|
||||||
updates = append(updates, update)
|
updates = append(updates, update)
|
||||||
|
|
||||||
// Hash key is required, range key isn't
|
// Hash key is required, range key isn't
|
||||||
hashkey_type, err := getAttributeType(d, *(gsi.KeySchema[0].AttributeName))
|
hashkey_type, err := getAttributeType(d, *gsi.KeySchema[0].AttributeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -396,7 +400,7 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
|
||||||
|
|
||||||
// If there's a range key, there will be 2 elements in KeySchema
|
// If there's a range key, there will be 2 elements in KeySchema
|
||||||
if len(gsi.KeySchema) == 2 {
|
if len(gsi.KeySchema) == 2 {
|
||||||
rangekey_type, err := getAttributeType(d, *(gsi.KeySchema[1].AttributeName))
|
rangekey_type, err := getAttributeType(d, *gsi.KeySchema[1].AttributeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -480,8 +484,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
|
||||||
|
|
||||||
capacityUpdated := false
|
capacityUpdated := false
|
||||||
|
|
||||||
if int64(gsiReadCapacity) != *(gsi.ProvisionedThroughput.ReadCapacityUnits) ||
|
if int64(gsiReadCapacity) != *gsi.ProvisionedThroughput.ReadCapacityUnits ||
|
||||||
int64(gsiWriteCapacity) != *(gsi.ProvisionedThroughput.WriteCapacityUnits) {
|
int64(gsiWriteCapacity) != *gsi.ProvisionedThroughput.WriteCapacityUnits {
|
||||||
capacityUpdated = true
|
capacityUpdated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -544,8 +548,8 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
attributes := []interface{}{}
|
attributes := []interface{}{}
|
||||||
for _, attrdef := range table.AttributeDefinitions {
|
for _, attrdef := range table.AttributeDefinitions {
|
||||||
attribute := map[string]string{
|
attribute := map[string]string{
|
||||||
"name": *(attrdef.AttributeName),
|
"name": *attrdef.AttributeName,
|
||||||
"type": *(attrdef.AttributeType),
|
"type": *attrdef.AttributeType,
|
||||||
}
|
}
|
||||||
attributes = append(attributes, attribute)
|
attributes = append(attributes, attribute)
|
||||||
log.Printf("[DEBUG] Added Attribute: %s", attribute["name"])
|
log.Printf("[DEBUG] Added Attribute: %s", attribute["name"])
|
||||||
|
@ -556,9 +560,9 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes))
|
gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes))
|
||||||
for _, gsiObject := range table.GlobalSecondaryIndexes {
|
for _, gsiObject := range table.GlobalSecondaryIndexes {
|
||||||
gsi := map[string]interface{}{
|
gsi := map[string]interface{}{
|
||||||
"write_capacity": *(gsiObject.ProvisionedThroughput.WriteCapacityUnits),
|
"write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits,
|
||||||
"read_capacity": *(gsiObject.ProvisionedThroughput.ReadCapacityUnits),
|
"read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits,
|
||||||
"name": *(gsiObject.IndexName),
|
"name": *gsiObject.IndexName,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, attribute := range gsiObject.KeySchema {
|
for _, attribute := range gsiObject.KeySchema {
|
||||||
|
@ -571,7 +575,7 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gsi["projection_type"] = *(gsiObject.Projection.ProjectionType)
|
gsi["projection_type"] = *gsiObject.Projection.ProjectionType
|
||||||
gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes
|
gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes
|
||||||
|
|
||||||
gsiList = append(gsiList, gsi)
|
gsiList = append(gsiList, gsi)
|
||||||
|
@ -647,7 +651,7 @@ func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryInd
|
||||||
|
|
||||||
func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) {
|
func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) {
|
||||||
for _, gsi := range indexList {
|
for _, gsi := range indexList {
|
||||||
if *(gsi.IndexName) == indexName {
|
if *gsi.IndexName == indexName {
|
||||||
return gsi, nil
|
return gsi, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -726,7 +730,7 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
activeState = *(result.Table.TableStatus) == "ACTIVE"
|
activeState = *result.Table.TableStatus == "ACTIVE"
|
||||||
|
|
||||||
// Wait for a few seconds
|
// Wait for a few seconds
|
||||||
if !activeState {
|
if !activeState {
|
||||||
|
|
|
@ -211,7 +211,7 @@ func dynamoDbAttributesToMap(attributes *[]*dynamodb.AttributeDefinition) map[st
|
||||||
attrmap := make(map[string]string)
|
attrmap := make(map[string]string)
|
||||||
|
|
||||||
for _, attrdef := range *attributes {
|
for _, attrdef := range *attributes {
|
||||||
attrmap[*(attrdef.AttributeName)] = *(attrdef.AttributeType)
|
attrmap[*attrdef.AttributeName] = *attrdef.AttributeType
|
||||||
}
|
}
|
||||||
|
|
||||||
return attrmap
|
return attrmap
|
||||||
|
|
|
@ -0,0 +1,165 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsEfsFileSystem() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsEfsFileSystemCreate,
|
||||||
|
Read: resourceAwsEfsFileSystemRead,
|
||||||
|
Update: resourceAwsEfsFileSystemUpdate,
|
||||||
|
Delete: resourceAwsEfsFileSystemDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"reference_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"tags": tagsSchema(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
|
||||||
|
referenceName := ""
|
||||||
|
if v, ok := d.GetOk("reference_name"); ok {
|
||||||
|
referenceName = v.(string) + "-"
|
||||||
|
}
|
||||||
|
token := referenceName + resource.UniqueId()
|
||||||
|
fs, err := conn.CreateFileSystem(&efs.CreateFileSystemInput{
|
||||||
|
CreationToken: aws.String(token),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating EFS file system: %s", *fs)
|
||||||
|
d.SetId(*fs.FileSystemId)
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"creating"},
|
||||||
|
Target: "available",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "error", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.FileSystems) < 1 {
|
||||||
|
return nil, "not-found", fmt.Errorf("EFS file system %q not found", d.Id())
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := resp.FileSystems[0]
|
||||||
|
log.Printf("[DEBUG] current status of %q: %q", *fs.FileSystemId, *fs.LifeCycleState)
|
||||||
|
return fs, *fs.LifeCycleState, nil
|
||||||
|
},
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
Delay: 2 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error waiting for EFS file system (%q) to create: %q",
|
||||||
|
d.Id(), err.Error())
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] EFS file system created: %q", *fs.FileSystemId)
|
||||||
|
|
||||||
|
return resourceAwsEfsFileSystemUpdate(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsFileSystemUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
err := setTagsEFS(conn, d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsEfsFileSystemRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
|
||||||
|
resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(resp.FileSystems) < 1 {
|
||||||
|
return fmt.Errorf("EFS file system %q not found", d.Id())
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsResp, err := conn.DescribeTags(&efs.DescribeTagsInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("tags", tagsToMapEFS(tagsResp.Tags))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsFileSystemDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting EFS file system %s", d.Id())
|
||||||
|
_, err := conn.DeleteFileSystem(&efs.DeleteFileSystemInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"available", "deleting"},
|
||||||
|
Target: "",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
efsErr, ok := err.(awserr.Error)
|
||||||
|
if ok && efsErr.Code() == "FileSystemNotFound" {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
return nil, "error", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.FileSystems) < 1 {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := resp.FileSystems[0]
|
||||||
|
log.Printf("[DEBUG] current status of %q: %q",
|
||||||
|
*fs.FileSystemId, *fs.LifeCycleState)
|
||||||
|
return fs, *fs.LifeCycleState, nil
|
||||||
|
},
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
Delay: 2 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] EFS file system %q deleted.", d.Id())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSEFSFileSystem(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckEfsFileSystemDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEFSFileSystemConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckEfsFileSystem(
|
||||||
|
"aws_efs_file_system.foo",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEFSFileSystemConfigWithTags,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckEfsFileSystem(
|
||||||
|
"aws_efs_file_system.foo-with-tags",
|
||||||
|
),
|
||||||
|
testAccCheckEfsFileSystemTags(
|
||||||
|
"aws_efs_file_system.foo-with-tags",
|
||||||
|
map[string]string{
|
||||||
|
"Name": "foo-efs",
|
||||||
|
"Another": "tag",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckEfsFileSystemDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckEfsFileSystem(resourceID string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[resourceID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
fs, ok := s.RootModule().Resources[resourceID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).efsconn
|
||||||
|
_, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
|
||||||
|
FileSystemId: aws.String(fs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckEfsFileSystemTags(resourceID string, expectedTags map[string]string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[resourceID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
fs, ok := s.RootModule().Resources[resourceID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).efsconn
|
||||||
|
resp, err := conn.DescribeTags(&efs.DescribeTagsInput{
|
||||||
|
FileSystemId: aws.String(fs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(expectedTags, tagsToMapEFS(resp.Tags)) {
|
||||||
|
return fmt.Errorf("Tags mismatch.\nExpected: %#v\nGiven: %#v",
|
||||||
|
expectedTags, resp.Tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccAWSEFSFileSystemConfig = `
|
||||||
|
resource "aws_efs_file_system" "foo" {
|
||||||
|
reference_name = "radeksimko"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccAWSEFSFileSystemConfigWithTags = `
|
||||||
|
resource "aws_efs_file_system" "foo-with-tags" {
|
||||||
|
reference_name = "yada_yada"
|
||||||
|
tags {
|
||||||
|
Name = "foo-efs"
|
||||||
|
Another = "tag"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
|
@ -0,0 +1,223 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsEfsMountTarget() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsEfsMountTargetCreate,
|
||||||
|
Read: resourceAwsEfsMountTargetRead,
|
||||||
|
Update: resourceAwsEfsMountTargetUpdate,
|
||||||
|
Delete: resourceAwsEfsMountTargetDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"file_system_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"ip_address": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"security_groups": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
Computed: true,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"subnet_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"network_interface_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsMountTargetCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
|
||||||
|
input := efs.CreateMountTargetInput{
|
||||||
|
FileSystemId: aws.String(d.Get("file_system_id").(string)),
|
||||||
|
SubnetId: aws.String(d.Get("subnet_id").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("ip_address"); ok {
|
||||||
|
input.IpAddress = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("security_groups"); ok {
|
||||||
|
input.SecurityGroups = expandStringList(v.(*schema.Set).List())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating EFS mount target: %#v", input)
|
||||||
|
|
||||||
|
mt, err := conn.CreateMountTarget(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(*mt.MountTargetId)
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"creating"},
|
||||||
|
Target: "available",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{
|
||||||
|
MountTargetId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "error", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.MountTargets) < 1 {
|
||||||
|
return nil, "error", fmt.Errorf("EFS mount target %q not found", d.Id())
|
||||||
|
}
|
||||||
|
|
||||||
|
mt := resp.MountTargets[0]
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Current status of %q: %q", *mt.MountTargetId, *mt.LifeCycleState)
|
||||||
|
return mt, *mt.LifeCycleState, nil
|
||||||
|
},
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
Delay: 2 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error waiting for EFS mount target (%s) to create: %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] EFS mount target created: %s", *mt.MountTargetId)
|
||||||
|
|
||||||
|
return resourceAwsEfsMountTargetRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsMountTargetUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
|
||||||
|
if d.HasChange("security_groups") {
|
||||||
|
input := efs.ModifyMountTargetSecurityGroupsInput{
|
||||||
|
MountTargetId: aws.String(d.Id()),
|
||||||
|
SecurityGroups: expandStringList(d.Get("security_groups").(*schema.Set).List()),
|
||||||
|
}
|
||||||
|
_, err := conn.ModifyMountTargetSecurityGroups(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsEfsMountTargetRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{
|
||||||
|
MountTargetId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.MountTargets) < 1 {
|
||||||
|
return fmt.Errorf("EFS mount target %q not found", d.Id())
|
||||||
|
}
|
||||||
|
|
||||||
|
mt := resp.MountTargets[0]
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Found EFS mount target: %#v", mt)
|
||||||
|
|
||||||
|
d.SetId(*mt.MountTargetId)
|
||||||
|
d.Set("file_system_id", *mt.FileSystemId)
|
||||||
|
d.Set("ip_address", *mt.IpAddress)
|
||||||
|
d.Set("subnet_id", *mt.SubnetId)
|
||||||
|
d.Set("network_interface_id", *mt.NetworkInterfaceId)
|
||||||
|
|
||||||
|
sgResp, err := conn.DescribeMountTargetSecurityGroups(&efs.DescribeMountTargetSecurityGroupsInput{
|
||||||
|
MountTargetId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("security_groups", schema.NewSet(schema.HashString, flattenStringList(sgResp.SecurityGroups)))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsEfsMountTargetDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).efsconn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting EFS mount target %q", d.Id())
|
||||||
|
_, err := conn.DeleteMountTarget(&efs.DeleteMountTargetInput{
|
||||||
|
MountTargetId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"available", "deleting", "deleted"},
|
||||||
|
Target: "",
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{
|
||||||
|
MountTargetId: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
awsErr, ok := err.(awserr.Error)
|
||||||
|
if !ok {
|
||||||
|
return nil, "error", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if awsErr.Code() == "MountTargetNotFound" {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, "error", awsErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.MountTargets) < 1 {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mt := resp.MountTargets[0]
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Current status of %q: %q", *mt.MountTargetId, *mt.LifeCycleState)
|
||||||
|
return mt, *mt.LifeCycleState, nil
|
||||||
|
},
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
Delay: 2 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error waiting for EFS mount target (%q) to delete: %q",
|
||||||
|
d.Id(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] EFS mount target %q deleted.", d.Id())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,135 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSEFSMountTarget(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckEfsMountTargetDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEFSMountTargetConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckEfsMountTarget(
|
||||||
|
"aws_efs_mount_target.alpha",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEFSMountTargetConfigModified,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckEfsMountTarget(
|
||||||
|
"aws_efs_mount_target.alpha",
|
||||||
|
),
|
||||||
|
testAccCheckEfsMountTarget(
|
||||||
|
"aws_efs_mount_target.beta",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckEfsMountTargetDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckEfsMountTarget(resourceID string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[resourceID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
fs, ok := s.RootModule().Resources[resourceID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).efsconn
|
||||||
|
mt, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{
|
||||||
|
MountTargetId: aws.String(fs.Primary.ID),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mt.MountTargets[0].MountTargetId != fs.Primary.ID {
|
||||||
|
return fmt.Errorf("Mount target ID mismatch: %q != %q",
|
||||||
|
*mt.MountTargets[0].MountTargetId, fs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccAWSEFSMountTargetConfig = `
|
||||||
|
resource "aws_efs_file_system" "foo" {
|
||||||
|
reference_name = "radeksimko"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_efs_mount_target" "alpha" {
|
||||||
|
file_system_id = "${aws_efs_file_system.foo.id}"
|
||||||
|
subnet_id = "${aws_subnet.alpha.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "alpha" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccAWSEFSMountTargetConfigModified = `
|
||||||
|
resource "aws_efs_file_system" "foo" {
|
||||||
|
reference_name = "radeksimko"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_efs_mount_target" "alpha" {
|
||||||
|
file_system_id = "${aws_efs_file_system.foo.id}"
|
||||||
|
subnet_id = "${aws_subnet.alpha.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_efs_mount_target" "beta" {
|
||||||
|
file_system_id = "${aws_efs_file_system.foo.id}"
|
||||||
|
subnet_id = "${aws_subnet.beta.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "alpha" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "beta" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
cidr_block = "10.0.2.0/24"
|
||||||
|
}
|
||||||
|
`
|
|
@ -30,13 +30,13 @@ func resourceAwsEip() *schema.Resource {
|
||||||
"instance": &schema.Schema{
|
"instance": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"network_interface": &schema.Schema{
|
"network_interface": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ConflictsWith: []string{"instance"},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"allocation_id": &schema.Schema{
|
"allocation_id": &schema.Schema{
|
||||||
|
@ -134,7 +134,7 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
|
||||||
// Verify AWS returned our EIP
|
// Verify AWS returned our EIP
|
||||||
if len(describeAddresses.Addresses) != 1 ||
|
if len(describeAddresses.Addresses) != 1 ||
|
||||||
(domain == "vpc" && *describeAddresses.Addresses[0].AllocationId != id) ||
|
domain == "vpc" && *describeAddresses.Addresses[0].AllocationId != id ||
|
||||||
*describeAddresses.Addresses[0].PublicIp != id {
|
*describeAddresses.Addresses[0].PublicIp != id {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses)
|
return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses)
|
||||||
|
|
|
@ -28,6 +28,12 @@ func resourceAwsElasticacheCluster() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
StateFunc: func(val interface{}) string {
|
||||||
|
// Elasticache normalizes cluster ids to lowercase,
|
||||||
|
// so we have to do this too or else we can end up
|
||||||
|
// with non-converging diffs.
|
||||||
|
return strings.ToLower(val.(string))
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"configuration_endpoint": &schema.Schema{
|
"configuration_endpoint": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -194,7 +200,11 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
|
||||||
return fmt.Errorf("Error creating Elasticache: %s", err)
|
return fmt.Errorf("Error creating Elasticache: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(*resp.CacheCluster.CacheClusterId)
|
// Assign the cluster id as the resource ID
|
||||||
|
// Elasticache always retains the id in lower case, so we have to
|
||||||
|
// mimic that or else we won't be able to refresh a resource whose
|
||||||
|
// name contained uppercase characters.
|
||||||
|
d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId))
|
||||||
|
|
||||||
pending := []string{"creating"}
|
pending := []string{"creating"}
|
||||||
stateConf := &resource.StateChangeConf{
|
stateConf := &resource.StateChangeConf{
|
||||||
|
|
|
@ -163,7 +163,10 @@ resource "aws_security_group" "bar" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elasticache_cluster" "bar" {
|
resource "aws_elasticache_cluster" "bar" {
|
||||||
cluster_id = "tf-test-%03d"
|
// Including uppercase letters in this name to ensure
|
||||||
|
// that we correctly handle the fact that the API
|
||||||
|
// normalizes names to lowercase.
|
||||||
|
cluster_id = "tf-TEST-%03d"
|
||||||
node_type = "cache.m1.small"
|
node_type = "cache.m1.small"
|
||||||
num_cache_nodes = 1
|
num_cache_nodes = 1
|
||||||
engine = "redis"
|
engine = "redis"
|
||||||
|
|
|
@ -0,0 +1,399 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsElasticSearchDomain() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsElasticSearchDomainCreate,
|
||||||
|
Read: resourceAwsElasticSearchDomainRead,
|
||||||
|
Update: resourceAwsElasticSearchDomainUpdate,
|
||||||
|
Delete: resourceAwsElasticSearchDomainDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"access_policies": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
StateFunc: normalizeJson,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"advanced_options": &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"domain_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z]+`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must start with a letter or number", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z][0-9a-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q can only contain lowercase characters, numbers and hyphens", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"domain_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"endpoint": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"ebs_options": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"ebs_enabled": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"iops": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"volume_size": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"volume_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"cluster_config": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"dedicated_master_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"dedicated_master_enabled": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
"dedicated_master_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"instance_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 1,
|
||||||
|
},
|
||||||
|
"instance_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "m3.medium.elasticsearch",
|
||||||
|
},
|
||||||
|
"zone_awareness_enabled": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"snapshot_options": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"automated_snapshot_start_hour": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).esconn
|
||||||
|
|
||||||
|
input := elasticsearch.CreateElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("access_policies"); ok {
|
||||||
|
input.AccessPolicies = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("advanced_options"); ok {
|
||||||
|
input.AdvancedOptions = stringMapToPointers(v.(map[string]interface{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("ebs_options"); ok {
|
||||||
|
options := v.([]interface{})
|
||||||
|
|
||||||
|
if len(options) > 1 {
|
||||||
|
return fmt.Errorf("Only a single ebs_options block is expected")
|
||||||
|
} else if len(options) == 1 {
|
||||||
|
if options[0] == nil {
|
||||||
|
return fmt.Errorf("At least one field is expected inside ebs_options")
|
||||||
|
}
|
||||||
|
|
||||||
|
s := options[0].(map[string]interface{})
|
||||||
|
input.EBSOptions = expandESEBSOptions(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("cluster_config"); ok {
|
||||||
|
config := v.([]interface{})
|
||||||
|
|
||||||
|
if len(config) > 1 {
|
||||||
|
return fmt.Errorf("Only a single cluster_config block is expected")
|
||||||
|
} else if len(config) == 1 {
|
||||||
|
if config[0] == nil {
|
||||||
|
return fmt.Errorf("At least one field is expected inside cluster_config")
|
||||||
|
}
|
||||||
|
m := config[0].(map[string]interface{})
|
||||||
|
input.ElasticsearchClusterConfig = expandESClusterConfig(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot_options"); ok {
|
||||||
|
options := v.([]interface{})
|
||||||
|
|
||||||
|
if len(options) > 1 {
|
||||||
|
return fmt.Errorf("Only a single snapshot_options block is expected")
|
||||||
|
} else if len(options) == 1 {
|
||||||
|
if options[0] == nil {
|
||||||
|
return fmt.Errorf("At least one field is expected inside snapshot_options")
|
||||||
|
}
|
||||||
|
|
||||||
|
o := options[0].(map[string]interface{})
|
||||||
|
|
||||||
|
snapshotOptions := elasticsearch.SnapshotOptions{
|
||||||
|
AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))),
|
||||||
|
}
|
||||||
|
|
||||||
|
input.SnapshotOptions = &snapshotOptions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating ElasticSearch domain: %s", input)
|
||||||
|
out, err := conn.CreateElasticsearchDomain(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(*out.DomainStatus.ARN)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be created", d.Id())
|
||||||
|
err = resource.Retry(15*time.Minute, func() error {
|
||||||
|
out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return resource.RetryError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*out.DomainStatus.Processing && out.DomainStatus.Endpoint != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%q: Timeout while waiting for the domain to be created", d.Id())
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] ElasticSearch domain %q created", d.Id())
|
||||||
|
|
||||||
|
return resourceAwsElasticSearchDomainRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).esconn
|
||||||
|
|
||||||
|
out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Received ElasticSearch domain: %s", out)
|
||||||
|
|
||||||
|
ds := out.DomainStatus
|
||||||
|
|
||||||
|
d.Set("access_policies", *ds.AccessPolicies)
|
||||||
|
err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Set("domain_id", *ds.DomainId)
|
||||||
|
d.Set("domain_name", *ds.DomainName)
|
||||||
|
if ds.Endpoint != nil {
|
||||||
|
d.Set("endpoint", *ds.Endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = d.Set("ebs_options", flattenESEBSOptions(ds.EBSOptions))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ds.SnapshotOptions != nil {
|
||||||
|
d.Set("snapshot_options", map[string]interface{}{
|
||||||
|
"automated_snapshot_start_hour": *ds.SnapshotOptions.AutomatedSnapshotStartHour,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("arn", *ds.ARN)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).esconn
|
||||||
|
|
||||||
|
input := elasticsearch.UpdateElasticsearchDomainConfigInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("access_policies") {
|
||||||
|
input.AccessPolicies = aws.String(d.Get("access_policies").(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("advanced_options") {
|
||||||
|
input.AdvancedOptions = stringMapToPointers(d.Get("advanced_options").(map[string]interface{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("ebs_options") {
|
||||||
|
options := d.Get("ebs_options").([]interface{})
|
||||||
|
|
||||||
|
if len(options) > 1 {
|
||||||
|
return fmt.Errorf("Only a single ebs_options block is expected")
|
||||||
|
} else if len(options) == 1 {
|
||||||
|
s := options[0].(map[string]interface{})
|
||||||
|
input.EBSOptions = expandESEBSOptions(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("cluster_config") {
|
||||||
|
config := d.Get("cluster_config").([]interface{})
|
||||||
|
|
||||||
|
if len(config) > 1 {
|
||||||
|
return fmt.Errorf("Only a single cluster_config block is expected")
|
||||||
|
} else if len(config) == 1 {
|
||||||
|
m := config[0].(map[string]interface{})
|
||||||
|
input.ElasticsearchClusterConfig = expandESClusterConfig(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("snapshot_options") {
|
||||||
|
options := d.Get("snapshot_options").([]interface{})
|
||||||
|
|
||||||
|
if len(options) > 1 {
|
||||||
|
return fmt.Errorf("Only a single snapshot_options block is expected")
|
||||||
|
} else if len(options) == 1 {
|
||||||
|
o := options[0].(map[string]interface{})
|
||||||
|
|
||||||
|
snapshotOptions := elasticsearch.SnapshotOptions{
|
||||||
|
AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))),
|
||||||
|
}
|
||||||
|
|
||||||
|
input.SnapshotOptions = &snapshotOptions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.UpdateElasticsearchDomainConfig(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = resource.Retry(25*time.Minute, func() error {
|
||||||
|
out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return resource.RetryError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *out.DomainStatus.Processing == false {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsElasticSearchDomainRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).esconn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting ElasticSearch domain: %q", d.Get("domain_name").(string))
|
||||||
|
_, err := conn.DeleteElasticsearchDomain(&elasticsearch.DeleteElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be deleted", d.Get("domain_name").(string))
|
||||||
|
err = resource.Retry(15*time.Minute, func() error {
|
||||||
|
out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
awsErr, ok := err.(awserr.Error)
|
||||||
|
if !ok {
|
||||||
|
return resource.RetryError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
if awsErr.Code() == "ResourceNotFoundException" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource.RetryError{Err: awsErr}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*out.DomainStatus.Processing {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%q: Timeout while waiting for the domain to be deleted", d.Id())
|
||||||
|
})
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,122 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSElasticSearchDomain_basic(t *testing.T) {
|
||||||
|
var domain elasticsearch.ElasticsearchDomainStatus
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckESDomainDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccESDomainConfig_basic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSElasticSearchDomain_complex(t *testing.T) {
|
||||||
|
var domain elasticsearch.ElasticsearchDomainStatus
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckESDomainDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccESDomainConfig_complex,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckESDomainExists(n string, domain *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ES Domain ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).esconn
|
||||||
|
opts := &elasticsearch.DescribeElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(rs.Primary.Attributes["domain_name"]),
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := conn.DescribeElasticsearchDomain(opts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error describing domain: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
*domain = *resp.DomainStatus
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckESDomainDestroy(s *terraform.State) error {
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_elasticsearch_domain" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).esconn
|
||||||
|
opts := &elasticsearch.DescribeElasticsearchDomainInput{
|
||||||
|
DomainName: aws.String(rs.Primary.Attributes["domain_name"]),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.DescribeElasticsearchDomain(opts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error describing ES domains: %q", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccESDomainConfig_basic = `
|
||||||
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
|
domain_name = "tf-test-1"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccESDomainConfig_complex = `
|
||||||
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
|
domain_name = "tf-test-2"
|
||||||
|
|
||||||
|
advanced_options {
|
||||||
|
"indices.fielddata.cache.size" = 80
|
||||||
|
}
|
||||||
|
|
||||||
|
ebs_options {
|
||||||
|
ebs_enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster_config {
|
||||||
|
instance_count = 2
|
||||||
|
zone_awareness_enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot_options {
|
||||||
|
automated_snapshot_start_hour = 23
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
|
@ -28,27 +28,7 @@ func resourceAwsElb() *schema.Resource {
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
ValidateFunc: validateElbName,
|
||||||
value := v.(string)
|
|
||||||
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"only alphanumeric characters and hyphens allowed in %q: %q",
|
|
||||||
k, value))
|
|
||||||
}
|
|
||||||
if len(value) > 32 {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot be longer than 32 characters: %q", k, value))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`^-`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot begin with a hyphen: %q", k, value))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot end with a hyphen: %q", k, value))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"internal": &schema.Schema{
|
"internal": &schema.Schema{
|
||||||
|
@ -591,3 +571,26 @@ func isLoadBalancerNotFound(err error) bool {
|
||||||
elberr, ok := err.(awserr.Error)
|
elberr, ok := err.(awserr.Error)
|
||||||
return ok && elberr.Code() == "LoadBalancerNotFound"
|
return ok && elberr.Code() == "LoadBalancerNotFound"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateElbName(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only alphanumeric characters and hyphens allowed in %q: %q",
|
||||||
|
k, value))
|
||||||
|
}
|
||||||
|
if len(value) > 32 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 32 characters: %q", k, value))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`^-`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot begin with a hyphen: %q", k, value))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot end with a hyphen: %q", k, value))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -431,12 +431,48 @@ func TestResourceAwsElbListenerHash(t *testing.T) {
|
||||||
for tn, tc := range cases {
|
for tn, tc := range cases {
|
||||||
leftHash := resourceAwsElbListenerHash(tc.Left)
|
leftHash := resourceAwsElbListenerHash(tc.Left)
|
||||||
rightHash := resourceAwsElbListenerHash(tc.Right)
|
rightHash := resourceAwsElbListenerHash(tc.Right)
|
||||||
if (leftHash == rightHash) != tc.Match {
|
if leftHash == rightHash != tc.Match {
|
||||||
t.Fatalf("%s: expected match: %t, but did not get it", tn, tc.Match)
|
t.Fatalf("%s: expected match: %t, but did not get it", tn, tc.Match)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestResourceAWSELB_validateElbNameCannotBeginWithHyphen(t *testing.T) {
|
||||||
|
var elbName = "-Testing123"
|
||||||
|
_, errors := validateElbName(elbName, "SampleKey")
|
||||||
|
|
||||||
|
if len(errors) != 1 {
|
||||||
|
t.Fatalf("Expected the ELB Name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceAWSELB_validateElbNameCannotBeLongerThen32Characters(t *testing.T) {
|
||||||
|
var elbName = "Testing123dddddddddddddddddddvvvv"
|
||||||
|
_, errors := validateElbName(elbName, "SampleKey")
|
||||||
|
|
||||||
|
if len(errors) != 1 {
|
||||||
|
t.Fatalf("Expected the ELB Name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceAWSELB_validateElbNameCannotHaveSpecialCharacters(t *testing.T) {
|
||||||
|
var elbName = "Testing123%%"
|
||||||
|
_, errors := validateElbName(elbName, "SampleKey")
|
||||||
|
|
||||||
|
if len(errors) != 1 {
|
||||||
|
t.Fatalf("Expected the ELB Name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceAWSELB_validateElbNameCannotEndWithHyphen(t *testing.T) {
|
||||||
|
var elbName = "Testing123-"
|
||||||
|
_, errors := validateElbName(elbName, "SampleKey")
|
||||||
|
|
||||||
|
if len(errors) != 1 {
|
||||||
|
t.Fatalf("Expected the ELB Name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSELBDestroy(s *terraform.State) error {
|
func testAccCheckAWSELBDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).elbconn
|
conn := testAccProvider.Meta().(*AWSClient).elbconn
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,387 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/glacier"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsGlacierVault() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsGlacierVaultCreate,
|
||||||
|
Read: resourceAwsGlacierVaultRead,
|
||||||
|
Update: resourceAwsGlacierVaultUpdate,
|
||||||
|
Delete: resourceAwsGlacierVaultDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k))
|
||||||
|
}
|
||||||
|
if len(value) > 255 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 255 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"location": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"access_policy": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
StateFunc: normalizeJson,
|
||||||
|
},
|
||||||
|
|
||||||
|
"notification": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"events": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Required: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
"sns_topic": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"tags": tagsSchema(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsGlacierVaultCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
glacierconn := meta.(*AWSClient).glacierconn
|
||||||
|
|
||||||
|
input := &glacier.CreateVaultInput{
|
||||||
|
VaultName: aws.String(d.Get("name").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := glacierconn.CreateVault(input)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating Glacier Vault: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(d.Get("name").(string))
|
||||||
|
d.Set("location", *out.Location)
|
||||||
|
|
||||||
|
return resourceAwsGlacierVaultUpdate(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
glacierconn := meta.(*AWSClient).glacierconn
|
||||||
|
|
||||||
|
if err := setGlacierVaultTags(glacierconn, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("access_policy") {
|
||||||
|
if err := resourceAwsGlacierVaultPolicyUpdate(glacierconn, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("notification") {
|
||||||
|
if err := resourceAwsGlacierVaultNotificationUpdate(glacierconn, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsGlacierVaultRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
glacierconn := meta.(*AWSClient).glacierconn
|
||||||
|
|
||||||
|
input := &glacier.DescribeVaultInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := glacierconn.DescribeVault(input)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error reading Glacier Vault: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("arn", *out.VaultARN)
|
||||||
|
|
||||||
|
tags, err := getGlacierVaultTags(glacierconn, d.Id())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Set("tags", tags)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Getting the access_policy for Vault %s", d.Id())
|
||||||
|
pol, err := glacierconn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
|
||||||
|
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" {
|
||||||
|
d.Set("access_policy", "")
|
||||||
|
} else if pol != nil {
|
||||||
|
d.Set("access_policy", normalizeJson(*pol.Policy.Policy))
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
notifications, err := getGlacierVaultNotification(glacierconn, d.Id())
|
||||||
|
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" {
|
||||||
|
d.Set("notification", "")
|
||||||
|
} else if pol != nil {
|
||||||
|
d.Set("notification", notifications)
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsGlacierVaultDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
glacierconn := meta.(*AWSClient).glacierconn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Glacier Delete Vault: %s", d.Id())
|
||||||
|
_, err := glacierconn.DeleteVault(&glacier.DeleteVaultInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting Glacier Vault: %s", err.Error())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error {
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("notification"); ok {
|
||||||
|
settings := v.([]interface{})
|
||||||
|
|
||||||
|
if len(settings) > 1 {
|
||||||
|
return fmt.Errorf("Only a single Notification Block is allowed for Glacier Vault")
|
||||||
|
} else if len(settings) == 1 {
|
||||||
|
s := settings[0].(map[string]interface{})
|
||||||
|
var events []*string
|
||||||
|
for _, id := range s["events"].(*schema.Set).List() {
|
||||||
|
events = append(events, aws.String(id.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
VaultNotificationConfig: &glacier.VaultNotificationConfig{
|
||||||
|
SNSTopic: aws.String(s["sns_topic"].(string)),
|
||||||
|
Events: events,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, err := glacierconn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error Removing Glacier Vault Notifications: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error {
|
||||||
|
vaultName := d.Id()
|
||||||
|
policyContents := d.Get("access_policy").(string)
|
||||||
|
|
||||||
|
policy := &glacier.VaultAccessPolicy{
|
||||||
|
Policy: aws.String(policyContents),
|
||||||
|
}
|
||||||
|
|
||||||
|
if policyContents != "" {
|
||||||
|
log.Printf("[DEBUG] Glacier Vault: %s, put policy", vaultName)
|
||||||
|
|
||||||
|
_, err := glacierconn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
Policy: policy,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error putting Glacier Vault policy: %s", err.Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Glacier Vault: %s, delete policy: %s", vaultName, policy)
|
||||||
|
_, err := glacierconn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting Glacier Vault policy: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setGlacierVaultTags(conn *glacier.Glacier, d *schema.ResourceData) error {
|
||||||
|
if d.HasChange("tags") {
|
||||||
|
oraw, nraw := d.GetChange("tags")
|
||||||
|
o := oraw.(map[string]interface{})
|
||||||
|
n := nraw.(map[string]interface{})
|
||||||
|
create, remove := diffGlacierVaultTags(mapGlacierVaultTags(o), mapGlacierVaultTags(n))
|
||||||
|
|
||||||
|
// Set tags
|
||||||
|
if len(remove) > 0 {
|
||||||
|
tagsToRemove := &glacier.RemoveTagsFromVaultInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
TagKeys: glacierStringsToPointyString(remove),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Removing tags: from %s", d.Id())
|
||||||
|
_, err := conn.RemoveTagsFromVault(tagsToRemove)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(create) > 0 {
|
||||||
|
tagsToAdd := &glacier.AddTagsToVaultInput{
|
||||||
|
VaultName: aws.String(d.Id()),
|
||||||
|
Tags: glacierVaultTagsFromMap(create),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating tags: for %s", d.Id())
|
||||||
|
_, err := conn.AddTagsToVault(tagsToAdd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mapGlacierVaultTags(m map[string]interface{}) map[string]string {
|
||||||
|
results := make(map[string]string)
|
||||||
|
for k, v := range m {
|
||||||
|
results[k] = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func diffGlacierVaultTags(oldTags, newTags map[string]string) (map[string]string, []string) {
|
||||||
|
|
||||||
|
create := make(map[string]string)
|
||||||
|
for k, v := range newTags {
|
||||||
|
create[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the list of what to remove
|
||||||
|
var remove []string
|
||||||
|
for k, v := range oldTags {
|
||||||
|
old, ok := create[k]
|
||||||
|
if !ok || old != v {
|
||||||
|
// Delete it!
|
||||||
|
remove = append(remove, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return create, remove
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[string]string, error) {
|
||||||
|
request := &glacier.ListTagsForVaultInput{
|
||||||
|
VaultName: aws.String(vaultName),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Getting the tags: for %s", vaultName)
|
||||||
|
response, err := glacierconn.ListTagsForVault(request)
|
||||||
|
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "NoSuchTagSet" {
|
||||||
|
return map[string]string{}, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return glacierVaultTagsToMap(response.Tags), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func glacierVaultTagsToMap(responseTags map[string]*string) map[string]string {
|
||||||
|
results := make(map[string]string, len(responseTags))
|
||||||
|
for k, v := range responseTags {
|
||||||
|
results[k] = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func glacierVaultTagsFromMap(responseTags map[string]string) map[string]*string {
|
||||||
|
results := make(map[string]*string, len(responseTags))
|
||||||
|
for k, v := range responseTags {
|
||||||
|
results[k] = aws.String(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func glacierStringsToPointyString(s []string) []*string {
|
||||||
|
results := make([]*string, len(s))
|
||||||
|
for i, x := range s {
|
||||||
|
results[i] = aws.String(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func glacierPointersToStringList(pointers []*string) []interface{} {
|
||||||
|
list := make([]interface{}, len(pointers))
|
||||||
|
for i, v := range pointers {
|
||||||
|
list[i] = *v
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) {
|
||||||
|
request := &glacier.GetVaultNotificationsInput{
|
||||||
|
VaultName: aws.String(vaultName),
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := glacierconn.GetVaultNotifications(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
notifications := make(map[string]interface{}, 0)
|
||||||
|
|
||||||
|
log.Print("[DEBUG] Flattening Glacier Vault Notifications")
|
||||||
|
|
||||||
|
notifications["events"] = schema.NewSet(schema.HashString, glacierPointersToStringList(response.VaultNotificationConfig.Events))
|
||||||
|
notifications["sns_topic"] = *response.VaultNotificationConfig.SNSTopic
|
||||||
|
|
||||||
|
return []map[string]interface{}{notifications}, nil
|
||||||
|
}
|
|
@ -0,0 +1,227 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/glacier"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSGlacierVault_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckGlacierVaultDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGlacierVault_basic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGlacierVaultExists("aws_glacier_vault.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSGlacierVault_full(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckGlacierVaultDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGlacierVault_full,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGlacierVaultExists("aws_glacier_vault.full"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckGlacierVaultDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGlacierVault_full,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGlacierVaultExists("aws_glacier_vault.full"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGlacierVault_withoutNotification,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGlacierVaultExists("aws_glacier_vault.full"),
|
||||||
|
testAccCheckVaultNotificationsMissing("aws_glacier_vault.full"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDiffGlacierVaultTags(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Old, New map[string]interface{}
|
||||||
|
Create map[string]string
|
||||||
|
Remove []string
|
||||||
|
}{
|
||||||
|
// Basic add/remove
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Remove: []string{
|
||||||
|
"foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Modify
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Remove: []string{
|
||||||
|
"foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range cases {
|
||||||
|
c, r := diffGlacierVaultTags(mapGlacierVaultTags(tc.Old), mapGlacierVaultTags(tc.New))
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(c, tc.Create) {
|
||||||
|
t.Fatalf("%d: bad create: %#v", i, c)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(r, tc.Remove) {
|
||||||
|
t.Fatalf("%d: bad remove: %#v", i, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn
|
||||||
|
out, err := glacierconn.DescribeVault(&glacier.DescribeVaultInput{
|
||||||
|
VaultName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.VaultARN == nil {
|
||||||
|
return fmt.Errorf("No Glacier Vault Found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *out.VaultName != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Glacier Vault Mismatch - existing: %q, state: %q",
|
||||||
|
*out.VaultName, rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn
|
||||||
|
out, err := glacierconn.GetVaultNotifications(&glacier.GetVaultNotificationsInput{
|
||||||
|
VaultName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if awserr, ok := err.(awserr.Error); ok && awserr.Code() != "ResourceNotFoundException" {
|
||||||
|
return fmt.Errorf("Expected ResourceNotFoundException for Vault %s Notification Block but got %s", rs.Primary.ID, awserr.Code())
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.VaultNotificationConfig != nil {
|
||||||
|
return fmt.Errorf("Vault Notification Block has been found for %s", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGlacierVaultDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v",
|
||||||
|
s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccGlacierVault_basic = `
|
||||||
|
resource "aws_glacier_vault" "test" {
|
||||||
|
name = "my_test_vault"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccGlacierVault_full = `
|
||||||
|
resource "aws_sns_topic" "aws_sns_topic" {
|
||||||
|
name = "glacier-sns-topic"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_glacier_vault" "full" {
|
||||||
|
name = "my_test_vault"
|
||||||
|
notification {
|
||||||
|
sns_topic = "${aws_sns_topic.aws_sns_topic.arn}"
|
||||||
|
events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"]
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Test="Test1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccGlacierVault_withoutNotification = `
|
||||||
|
resource "aws_sns_topic" "aws_sns_topic" {
|
||||||
|
name = "glacier-sns-topic"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_glacier_vault" "full" {
|
||||||
|
name = "my_test_vault"
|
||||||
|
tags {
|
||||||
|
Test="Test1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
|
@ -102,7 +102,7 @@ func testAccCheckAWSPolicyAttachmentAttributes(users []string, roles []string, g
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if uc != 0 || rc != 0 || gc != 0 {
|
if uc != 0 || rc != 0 || gc != 0 {
|
||||||
return fmt.Errorf("Error: Number of attached users, roles, or groups was incorrect:\n expected %d users and found %d\nexpected %d roles and found %d\nexpected %d groups and found %d", len(users), (len(users) - uc), len(roles), (len(roles) - rc), len(groups), (len(groups) - gc))
|
return fmt.Errorf("Error: Number of attached users, roles, or groups was incorrect:\n expected %d users and found %d\nexpected %d roles and found %d\nexpected %d groups and found %d", len(users), len(users)-uc, len(roles), len(roles)-rc, len(groups), len(groups)-gc)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,101 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsIamSamlProvider() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsIamSamlProviderCreate,
|
||||||
|
Read: resourceAwsIamSamlProviderRead,
|
||||||
|
Update: resourceAwsIamSamlProviderUpdate,
|
||||||
|
Delete: resourceAwsIamSamlProviderDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"valid_until": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"saml_metadata_document": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamSamlProviderCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
input := &iam.CreateSAMLProviderInput{
|
||||||
|
Name: aws.String(d.Get("name").(string)),
|
||||||
|
SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := iamconn.CreateSAMLProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(*out.SAMLProviderArn)
|
||||||
|
|
||||||
|
return resourceAwsIamSamlProviderRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
input := &iam.GetSAMLProviderInput{
|
||||||
|
SAMLProviderArn: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
out, err := iamconn.GetSAMLProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
validUntil := out.ValidUntil.Format(time.RFC1123)
|
||||||
|
d.Set("valid_until", validUntil)
|
||||||
|
d.Set("saml_metadata_document", *out.SAMLMetadataDocument)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamSamlProviderUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
input := &iam.UpdateSAMLProviderInput{
|
||||||
|
SAMLProviderArn: aws.String(d.Id()),
|
||||||
|
SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)),
|
||||||
|
}
|
||||||
|
_, err := iamconn.UpdateSAMLProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsIamSamlProviderRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamSamlProviderDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
input := &iam.DeleteSAMLProviderInput{
|
||||||
|
SAMLProviderArn: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
_, err := iamconn.DeleteSAMLProvider(input)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSIAMSamlProvider_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckIAMSamlProviderDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccIAMSamlProviderConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccIAMSamlProviderConfigUpdate,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckIAMSamlProvider(id string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[id]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not Found: %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||||
|
_, err := iamconn.GetSAMLProvider(&iam.GetSAMLProviderInput{
|
||||||
|
SAMLProviderArn: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccIAMSamlProviderConfig = `
|
||||||
|
resource "aws_iam_saml_provider" "salesforce" {
|
||||||
|
name = "tf-salesforce-test"
|
||||||
|
saml_metadata_document = "${file("./test-fixtures/saml-metadata.xml")}"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccIAMSamlProviderConfigUpdate = `
|
||||||
|
resource "aws_iam_saml_provider" "salesforce" {
|
||||||
|
name = "tf-salesforce-test"
|
||||||
|
saml_metadata_document = "${file("./test-fixtures/saml-metadata-modified.xml")}"
|
||||||
|
}
|
||||||
|
`
|
|
@ -414,11 +414,6 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set our attributes
|
|
||||||
if err := resourceAwsInstanceRead(d, meta); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update if we need to
|
// Update if we need to
|
||||||
return resourceAwsInstanceUpdate(d, meta)
|
return resourceAwsInstanceUpdate(d, meta)
|
||||||
}
|
}
|
||||||
|
@ -548,7 +543,8 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SourceDestCheck can only be set on VPC instances
|
// SourceDestCheck can only be set on VPC instances
|
||||||
if d.Get("subnet_id").(string) != "" {
|
// AWS will return an error of InvalidParameterCombination if we attempt
|
||||||
|
// to modify the source_dest_check of an instance in EC2 Classic
|
||||||
log.Printf("[INFO] Modifying instance %s", d.Id())
|
log.Printf("[INFO] Modifying instance %s", d.Id())
|
||||||
_, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
_, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
|
||||||
InstanceId: aws.String(d.Id()),
|
InstanceId: aws.String(d.Id()),
|
||||||
|
@ -557,8 +553,14 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if ec2err, ok := err.(awserr.Error); ok {
|
||||||
|
// Toloerate InvalidParameterCombination error in Classic, otherwise
|
||||||
|
// return the error
|
||||||
|
if "InvalidParameterCombination" != ec2err.Code() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Printf("[WARN] Attempted to modify SourceDestCheck on non VPC instance: %s", ec2err.Message())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("vpc_security_group_ids") {
|
if d.HasChange("vpc_security_group_ids") {
|
||||||
|
@ -693,7 +695,7 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st
|
||||||
instanceBlockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping)
|
instanceBlockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping)
|
||||||
for _, bd := range instance.BlockDeviceMappings {
|
for _, bd := range instance.BlockDeviceMappings {
|
||||||
if bd.Ebs != nil {
|
if bd.Ebs != nil {
|
||||||
instanceBlockDevices[*(bd.Ebs.VolumeId)] = bd
|
instanceBlockDevices[*bd.Ebs.VolumeId] = bd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -753,9 +755,9 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st
|
||||||
}
|
}
|
||||||
|
|
||||||
func blockDeviceIsRoot(bd *ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool {
|
func blockDeviceIsRoot(bd *ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool {
|
||||||
return (bd.DeviceName != nil &&
|
return bd.DeviceName != nil &&
|
||||||
instance.RootDeviceName != nil &&
|
instance.RootDeviceName != nil &&
|
||||||
*bd.DeviceName == *instance.RootDeviceName)
|
*bd.DeviceName == *instance.RootDeviceName
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) {
|
func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) {
|
||||||
|
|
|
@ -190,6 +190,9 @@ func TestAccAWSInstance_sourceDestCheck(t *testing.T) {
|
||||||
|
|
||||||
testCheck := func(enabled bool) resource.TestCheckFunc {
|
testCheck := func(enabled bool) resource.TestCheckFunc {
|
||||||
return func(*terraform.State) error {
|
return func(*terraform.State) error {
|
||||||
|
if v.SourceDestCheck == nil {
|
||||||
|
return fmt.Errorf("bad source_dest_check: got nil")
|
||||||
|
}
|
||||||
if *v.SourceDestCheck != enabled {
|
if *v.SourceDestCheck != enabled {
|
||||||
return fmt.Errorf("bad source_dest_check: %#v", *v.SourceDestCheck)
|
return fmt.Errorf("bad source_dest_check: %#v", *v.SourceDestCheck)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
@ -18,6 +19,9 @@ func resourceAwsKeyPair() *schema.Resource {
|
||||||
Update: nil,
|
Update: nil,
|
||||||
Delete: resourceAwsKeyPairDelete,
|
Delete: resourceAwsKeyPairDelete,
|
||||||
|
|
||||||
|
SchemaVersion: 1,
|
||||||
|
MigrateState: resourceAwsKeyPairMigrateState,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"key_name": &schema.Schema{
|
"key_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -29,6 +33,14 @@ func resourceAwsKeyPair() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
StateFunc: func(v interface{}) string {
|
||||||
|
switch v.(type) {
|
||||||
|
case string:
|
||||||
|
return strings.TrimSpace(v.(string))
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"fingerprint": &schema.Schema{
|
"fingerprint": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -45,6 +57,7 @@ func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
if keyName == "" {
|
if keyName == "" {
|
||||||
keyName = resource.UniqueId()
|
keyName = resource.UniqueId()
|
||||||
}
|
}
|
||||||
|
|
||||||
publicKey := d.Get("public_key").(string)
|
publicKey := d.Get("public_key").(string)
|
||||||
req := &ec2.ImportKeyPairInput{
|
req := &ec2.ImportKeyPairInput{
|
||||||
KeyName: aws.String(keyName),
|
KeyName: aws.String(keyName),
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsKeyPairMigrateState(
|
||||||
|
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||||
|
switch v {
|
||||||
|
case 0:
|
||||||
|
log.Println("[INFO] Found AWS Key Pair State v0; migrating to v1")
|
||||||
|
return migrateKeyPairStateV0toV1(is)
|
||||||
|
default:
|
||||||
|
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return is, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||||
|
if is.Empty() {
|
||||||
|
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||||
|
return is, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||||
|
|
||||||
|
// replace public_key with a stripped version, removing `\n` from the end
|
||||||
|
// see https://github.com/hashicorp/terraform/issues/3455
|
||||||
|
is.Attributes["public_key"] = strings.TrimSpace(is.Attributes["public_key"])
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||||
|
return is, nil
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAWSKeyPairMigrateState(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
StateVersion int
|
||||||
|
ID string
|
||||||
|
Attributes map[string]string
|
||||||
|
Expected string
|
||||||
|
Meta interface{}
|
||||||
|
}{
|
||||||
|
"v0_1": {
|
||||||
|
StateVersion: 0,
|
||||||
|
ID: "tf-testing-file",
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42",
|
||||||
|
"key_name": "tf-testing-file",
|
||||||
|
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock",
|
||||||
|
},
|
||||||
|
Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock",
|
||||||
|
},
|
||||||
|
"v0_2": {
|
||||||
|
StateVersion: 0,
|
||||||
|
ID: "tf-testing-file",
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42",
|
||||||
|
"key_name": "tf-testing-file",
|
||||||
|
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock\n",
|
||||||
|
},
|
||||||
|
Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for tn, tc := range cases {
|
||||||
|
is := &terraform.InstanceState{
|
||||||
|
ID: tc.ID,
|
||||||
|
Attributes: tc.Attributes,
|
||||||
|
}
|
||||||
|
is, err := resourceAwsKeyPairMigrateState(
|
||||||
|
tc.StateVersion, is, tc.Meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if is.Attributes["public_key"] != tc.Expected {
|
||||||
|
t.Fatalf("Bad public_key migration: %s\n\n expected: %s", is.Attributes["public_key"], tc.Expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -15,6 +16,7 @@ func resourceAwsKinesisStream() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsKinesisStreamCreate,
|
Create: resourceAwsKinesisStreamCreate,
|
||||||
Read: resourceAwsKinesisStreamRead,
|
Read: resourceAwsKinesisStreamRead,
|
||||||
|
Update: resourceAwsKinesisStreamUpdate,
|
||||||
Delete: resourceAwsKinesisStreamDelete,
|
Delete: resourceAwsKinesisStreamDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
@ -35,6 +37,7 @@ func resourceAwsKinesisStream() *schema.Resource {
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"tags": tagsSchema(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -75,13 +78,28 @@ func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) er
|
||||||
d.SetId(*s.StreamARN)
|
d.SetId(*s.StreamARN)
|
||||||
d.Set("arn", s.StreamARN)
|
d.Set("arn", s.StreamARN)
|
||||||
|
|
||||||
return nil
|
return resourceAwsKinesisStreamUpdate(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).kinesisconn
|
||||||
|
|
||||||
|
d.Partial(true)
|
||||||
|
if err := setTagsKinesis(conn, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetPartial("tags")
|
||||||
|
d.Partial(false)
|
||||||
|
|
||||||
|
return resourceAwsKinesisStreamRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).kinesisconn
|
conn := meta.(*AWSClient).kinesisconn
|
||||||
|
sn := d.Get("name").(string)
|
||||||
describeOpts := &kinesis.DescribeStreamInput{
|
describeOpts := &kinesis.DescribeStreamInput{
|
||||||
StreamName: aws.String(d.Get("name").(string)),
|
StreamName: aws.String(sn),
|
||||||
}
|
}
|
||||||
resp, err := conn.DescribeStream(describeOpts)
|
resp, err := conn.DescribeStream(describeOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -99,6 +117,17 @@ func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
d.Set("arn", *s.StreamARN)
|
d.Set("arn", *s.StreamARN)
|
||||||
d.Set("shard_count", len(s.Shards))
|
d.Set("shard_count", len(s.Shards))
|
||||||
|
|
||||||
|
// set tags
|
||||||
|
describeTagsOpts := &kinesis.ListTagsForStreamInput{
|
||||||
|
StreamName: aws.String(sn),
|
||||||
|
}
|
||||||
|
tagsResp, err := conn.ListTagsForStream(describeTagsOpts)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[DEBUG] Error retrieving tags for Stream: %s. %s", sn, err)
|
||||||
|
} else {
|
||||||
|
d.Set("tags", tagsToMapKinesis(tagsResp.Tags))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,5 +107,8 @@ var testAccKinesisStreamConfig = fmt.Sprintf(`
|
||||||
resource "aws_kinesis_stream" "test_stream" {
|
resource "aws_kinesis_stream" "test_stream" {
|
||||||
name = "terraform-kinesis-test-%d"
|
name = "terraform-kinesis-test-%d"
|
||||||
shard_count = 2
|
shard_count = 2
|
||||||
|
tags {
|
||||||
|
Name = "tf-test"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
||||||
|
|
|
@ -15,8 +15,6 @@ func resourceAwsLBCookieStickinessPolicy() *schema.Resource {
|
||||||
// There is no concept of "updating" an LB Stickiness policy in
|
// There is no concept of "updating" an LB Stickiness policy in
|
||||||
// the AWS API.
|
// the AWS API.
|
||||||
Create: resourceAwsLBCookieStickinessPolicyCreate,
|
Create: resourceAwsLBCookieStickinessPolicyCreate,
|
||||||
Update: resourceAwsLBCookieStickinessPolicyCreate,
|
|
||||||
|
|
||||||
Read: resourceAwsLBCookieStickinessPolicyRead,
|
Read: resourceAwsLBCookieStickinessPolicyRead,
|
||||||
Delete: resourceAwsLBCookieStickinessPolicyDelete,
|
Delete: resourceAwsLBCookieStickinessPolicyDelete,
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksCustomLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "custom",
|
||||||
|
CustomShortName: true,
|
||||||
|
|
||||||
|
// The "custom" layer type has no additional attributes
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,234 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
|
||||||
|
// and `aws-opsworks-service-role`.
|
||||||
|
|
||||||
|
func TestAccAwsOpsworksCustomLayer(t *testing.T) {
|
||||||
|
opsiam := testAccAwsOpsworksStackIam{}
|
||||||
|
testAccAwsOpsworksStackPopulateIam(t, &opsiam)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccAwsOpsworksCustomLayerConfigCreate, opsiam.ServiceRoleArn, opsiam.InstanceProfileArn),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "name", "tf-ops-acc-custom-layer",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "auto_assign_elastic_ips", "false",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "auto_healing", "true",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "true",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "300",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.#", "2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "1",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccAwsOpsworksCustomLayerConfigUpdate, opsiam.ServiceRoleArn, opsiam.InstanceProfileArn),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "name", "tf-ops-acc-custom-layer",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "false",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "120",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "3",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.#", "3",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "system_packages.4101929740", "subversion",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.type", "io1",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.number_of_disks", "4",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.mount_point", "/var",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.size", "100",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.raid_level", "1",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.iops", "3000",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAwsOpsworksCustomLayerDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccAwsOpsworksCustomLayerSecurityGroups = `
|
||||||
|
resource "aws_security_group" "tf-ops-acc-layer1" {
|
||||||
|
name = "tf-ops-acc-layer1"
|
||||||
|
ingress {
|
||||||
|
from_port = 8
|
||||||
|
to_port = -1
|
||||||
|
protocol = "icmp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource "aws_security_group" "tf-ops-acc-layer2" {
|
||||||
|
name = "tf-ops-acc-layer2"
|
||||||
|
ingress {
|
||||||
|
from_port = 8
|
||||||
|
to_port = -1
|
||||||
|
protocol = "icmp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAwsOpsworksCustomLayerConfigCreate = testAccAwsOpsworksStackConfigNoVpcCreate + testAccAwsOpsworksCustomLayerSecurityGroups + `
|
||||||
|
resource "aws_opsworks_custom_layer" "tf-acc" {
|
||||||
|
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||||
|
name = "tf-ops-acc-custom-layer"
|
||||||
|
short_name = "tf-ops-acc-custom-layer"
|
||||||
|
auto_assign_public_ips = true
|
||||||
|
custom_security_group_ids = [
|
||||||
|
"${aws_security_group.tf-ops-acc-layer1.id}",
|
||||||
|
"${aws_security_group.tf-ops-acc-layer2.id}",
|
||||||
|
]
|
||||||
|
drain_elb_on_shutdown = true
|
||||||
|
instance_shutdown_timeout = 300
|
||||||
|
system_packages = [
|
||||||
|
"git",
|
||||||
|
"golang",
|
||||||
|
]
|
||||||
|
ebs_volume {
|
||||||
|
type = "gp2"
|
||||||
|
number_of_disks = 2
|
||||||
|
mount_point = "/home"
|
||||||
|
size = 100
|
||||||
|
raid_level = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAwsOpsworksCustomLayerConfigUpdate = testAccAwsOpsworksStackConfigNoVpcCreate + testAccAwsOpsworksCustomLayerSecurityGroups + `
|
||||||
|
resource "aws_security_group" "tf-ops-acc-layer3" {
|
||||||
|
name = "tf-ops-acc-layer3"
|
||||||
|
ingress {
|
||||||
|
from_port = 8
|
||||||
|
to_port = -1
|
||||||
|
protocol = "icmp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource "aws_opsworks_custom_layer" "tf-acc" {
|
||||||
|
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||||
|
name = "tf-ops-acc-custom-layer"
|
||||||
|
short_name = "tf-ops-acc-custom-layer"
|
||||||
|
auto_assign_public_ips = true
|
||||||
|
custom_security_group_ids = [
|
||||||
|
"${aws_security_group.tf-ops-acc-layer1.id}",
|
||||||
|
"${aws_security_group.tf-ops-acc-layer2.id}",
|
||||||
|
"${aws_security_group.tf-ops-acc-layer3.id}",
|
||||||
|
]
|
||||||
|
drain_elb_on_shutdown = false
|
||||||
|
instance_shutdown_timeout = 120
|
||||||
|
system_packages = [
|
||||||
|
"git",
|
||||||
|
"golang",
|
||||||
|
"subversion",
|
||||||
|
]
|
||||||
|
ebs_volume {
|
||||||
|
type = "gp2"
|
||||||
|
number_of_disks = 2
|
||||||
|
mount_point = "/home"
|
||||||
|
size = 100
|
||||||
|
raid_level = 0
|
||||||
|
}
|
||||||
|
ebs_volume {
|
||||||
|
type = "io1"
|
||||||
|
number_of_disks = 4
|
||||||
|
mount_point = "/var"
|
||||||
|
size = 100
|
||||||
|
raid_level = 1
|
||||||
|
iops = 3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
|
@ -0,0 +1,33 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksGangliaLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "monitoring-master",
|
||||||
|
DefaultLayerName: "Ganglia",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"url": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "GangliaUrl",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "/ganglia",
|
||||||
|
},
|
||||||
|
"username": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "GangliaUser",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "opsworks",
|
||||||
|
},
|
||||||
|
"password": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "GangliaPassword",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
WriteOnly: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,48 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksHaproxyLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "lb",
|
||||||
|
DefaultLayerName: "HAProxy",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"stats_enabled": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "EnableHaproxyStats",
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
"stats_url": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "HaproxyStatsUrl",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "/haproxy?stats",
|
||||||
|
},
|
||||||
|
"stats_user": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "HaproxyStatsUser",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "opsworks",
|
||||||
|
},
|
||||||
|
"stats_password": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "HaproxyStatsPassword",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
WriteOnly: true,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"healthcheck_url": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "HaproxyHealthCheckUrl",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "/",
|
||||||
|
},
|
||||||
|
"healthcheck_method": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "HaproxyHealthCheckMethod",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "OPTIONS",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksJavaAppLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "java-app",
|
||||||
|
DefaultLayerName: "Java App Server",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"jvm_type": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "Jvm",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "openjdk",
|
||||||
|
},
|
||||||
|
"jvm_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "JvmVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "7",
|
||||||
|
},
|
||||||
|
"jvm_options": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "JvmOptions",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "",
|
||||||
|
},
|
||||||
|
"app_server": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "JavaAppServer",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "tomcat",
|
||||||
|
},
|
||||||
|
"app_server_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "JavaAppServerVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "7",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksMemcachedLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "memcached",
|
||||||
|
DefaultLayerName: "Memcached",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"allocated_memory": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "MemcachedMemory",
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Default: 512,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksMysqlLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "db-master",
|
||||||
|
DefaultLayerName: "MySQL",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"root_password": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "MysqlRootPassword",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
WriteOnly: true,
|
||||||
|
},
|
||||||
|
"root_password_on_all_instances": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "MysqlRootPasswordUbiquitous",
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksNodejsAppLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "nodejs-app",
|
||||||
|
DefaultLayerName: "Node.js App Server",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"nodejs_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "NodejsVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "0.10.38",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksPhpAppLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "php-app",
|
||||||
|
DefaultLayerName: "PHP App Server",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksRailsAppLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "rails-app",
|
||||||
|
DefaultLayerName: "Rails App Server",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
|
"ruby_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "RubyVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "2.0.0",
|
||||||
|
},
|
||||||
|
"app_server": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "RailsStack",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "apache_passenger",
|
||||||
|
},
|
||||||
|
"passenger_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "PassengerVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "4.0.46",
|
||||||
|
},
|
||||||
|
"rubygems_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "RubygemsVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "2.2.2",
|
||||||
|
},
|
||||||
|
"manage_bundler": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "ManageBundler",
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
"bundler_version": &opsworksLayerTypeAttribute{
|
||||||
|
AttrName: "BundlerVersion",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "1.5.3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,456 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStack() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsOpsworksStackCreate,
|
||||||
|
Read: resourceAwsOpsworksStackRead,
|
||||||
|
Update: resourceAwsOpsworksStackUpdate,
|
||||||
|
Delete: resourceAwsOpsworksStackDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"region": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ForceNew: true,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"service_role_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_instance_profile_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"color": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"configuration_manager_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "Chef",
|
||||||
|
},
|
||||||
|
|
||||||
|
"configuration_manager_version": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "11.4",
|
||||||
|
},
|
||||||
|
|
||||||
|
"manage_berkshelf": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"berkshelf_version": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "3.2.0",
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_cookbooks_source": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"url": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"username": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"password": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"revision": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"ssh_key": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"custom_json": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_availability_zone": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_os": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "Ubuntu 12.04 LTS",
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_root_device_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "instance-store",
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_ssh_key_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_subnet_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"hostname_theme": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "Layer_Dependent",
|
||||||
|
},
|
||||||
|
|
||||||
|
"use_custom_cookbooks": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"use_opsworks_security_groups": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"vpc_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ForceNew: true,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStackValidate(d *schema.ResourceData) error {
|
||||||
|
cookbooksSourceCount := d.Get("custom_cookbooks_source.#").(int)
|
||||||
|
if cookbooksSourceCount > 1 {
|
||||||
|
return fmt.Errorf("Only one custom_cookbooks_source is permitted")
|
||||||
|
}
|
||||||
|
|
||||||
|
vpcId := d.Get("vpc_id").(string)
|
||||||
|
if vpcId != "" {
|
||||||
|
if d.Get("default_subnet_id").(string) == "" {
|
||||||
|
return fmt.Errorf("default_subnet_id must be set if vpc_id is set")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if d.Get("default_availability_zone").(string) == "" {
|
||||||
|
return fmt.Errorf("either vpc_id or default_availability_zone must be set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStackCustomCookbooksSource(d *schema.ResourceData) *opsworks.Source {
|
||||||
|
count := d.Get("custom_cookbooks_source.#").(int)
|
||||||
|
if count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &opsworks.Source{
|
||||||
|
Type: aws.String(d.Get("custom_cookbooks_source.0.type").(string)),
|
||||||
|
Url: aws.String(d.Get("custom_cookbooks_source.0.url").(string)),
|
||||||
|
Username: aws.String(d.Get("custom_cookbooks_source.0.username").(string)),
|
||||||
|
Password: aws.String(d.Get("custom_cookbooks_source.0.password").(string)),
|
||||||
|
Revision: aws.String(d.Get("custom_cookbooks_source.0.revision").(string)),
|
||||||
|
SshKey: aws.String(d.Get("custom_cookbooks_source.0.ssh_key").(string)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksSetStackCustomCookbooksSource(d *schema.ResourceData, v *opsworks.Source) {
|
||||||
|
nv := make([]interface{}, 0, 1)
|
||||||
|
if v != nil {
|
||||||
|
m := make(map[string]interface{})
|
||||||
|
if v.Type != nil {
|
||||||
|
m["type"] = *v.Type
|
||||||
|
}
|
||||||
|
if v.Url != nil {
|
||||||
|
m["url"] = *v.Url
|
||||||
|
}
|
||||||
|
if v.Username != nil {
|
||||||
|
m["username"] = *v.Username
|
||||||
|
}
|
||||||
|
if v.Password != nil {
|
||||||
|
m["password"] = *v.Password
|
||||||
|
}
|
||||||
|
if v.Revision != nil {
|
||||||
|
m["revision"] = *v.Revision
|
||||||
|
}
|
||||||
|
if v.SshKey != nil {
|
||||||
|
m["ssh_key"] = *v.SshKey
|
||||||
|
}
|
||||||
|
nv = append(nv, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.Set("custom_cookbooks_source", nv)
|
||||||
|
if err != nil {
|
||||||
|
// should never happen
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
|
||||||
|
req := &opsworks.DescribeStacksInput{
|
||||||
|
StackIds: []*string{
|
||||||
|
aws.String(d.Id()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Reading OpsWorks stack: %s", d.Id())
|
||||||
|
|
||||||
|
resp, err := client.DescribeStacks(req)
|
||||||
|
if err != nil {
|
||||||
|
if awserr, ok := err.(awserr.Error); ok {
|
||||||
|
if awserr.Code() == "ResourceNotFoundException" {
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
stack := resp.Stacks[0]
|
||||||
|
d.Set("name", stack.Name)
|
||||||
|
d.Set("region", stack.Region)
|
||||||
|
d.Set("default_instance_profile_arn", stack.DefaultInstanceProfileArn)
|
||||||
|
d.Set("service_role_arn", stack.ServiceRoleArn)
|
||||||
|
d.Set("default_availability_zone", stack.DefaultAvailabilityZone)
|
||||||
|
d.Set("default_os", stack.DefaultOs)
|
||||||
|
d.Set("default_root_device_type", stack.DefaultRootDeviceType)
|
||||||
|
d.Set("default_ssh_key_name", stack.DefaultSshKeyName)
|
||||||
|
d.Set("default_subnet_id", stack.DefaultSubnetId)
|
||||||
|
d.Set("hostname_theme", stack.HostnameTheme)
|
||||||
|
d.Set("use_custom_cookbooks", stack.UseCustomCookbooks)
|
||||||
|
d.Set("use_opsworks_security_groups", stack.UseOpsworksSecurityGroups)
|
||||||
|
d.Set("vpc_id", stack.VpcId)
|
||||||
|
if color, ok := stack.Attributes["Color"]; ok {
|
||||||
|
d.Set("color", color)
|
||||||
|
}
|
||||||
|
if stack.ConfigurationManager != nil {
|
||||||
|
d.Set("configuration_manager_name", stack.ConfigurationManager.Name)
|
||||||
|
d.Set("configuration_manager_version", stack.ConfigurationManager.Version)
|
||||||
|
}
|
||||||
|
if stack.ChefConfiguration != nil {
|
||||||
|
d.Set("berkshelf_version", stack.ChefConfiguration.BerkshelfVersion)
|
||||||
|
d.Set("manage_berkshelf", stack.ChefConfiguration.ManageBerkshelf)
|
||||||
|
}
|
||||||
|
resourceAwsOpsworksSetStackCustomCookbooksSource(d, stack.CustomCookbooksSource)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
|
||||||
|
err := resourceAwsOpsworksStackValidate(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &opsworks.CreateStackInput{
|
||||||
|
DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)),
|
||||||
|
Name: aws.String(d.Get("name").(string)),
|
||||||
|
Region: aws.String(d.Get("region").(string)),
|
||||||
|
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
|
||||||
|
}
|
||||||
|
inVpc := false
|
||||||
|
if vpcId, ok := d.GetOk("vpc_id"); ok {
|
||||||
|
req.VpcId = aws.String(vpcId.(string))
|
||||||
|
inVpc = true
|
||||||
|
}
|
||||||
|
if defaultSubnetId, ok := d.GetOk("default_subnet_id"); ok {
|
||||||
|
req.DefaultSubnetId = aws.String(defaultSubnetId.(string))
|
||||||
|
}
|
||||||
|
if defaultAvailabilityZone, ok := d.GetOk("default_availability_zone"); ok {
|
||||||
|
req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating OpsWorks stack: %s", *req.Name)
|
||||||
|
|
||||||
|
var resp *opsworks.CreateStackOutput
|
||||||
|
err = resource.Retry(20*time.Minute, func() error {
|
||||||
|
var cerr error
|
||||||
|
resp, cerr = client.CreateStack(req)
|
||||||
|
if cerr != nil {
|
||||||
|
if opserr, ok := cerr.(awserr.Error); ok {
|
||||||
|
// If Terraform is also managing the service IAM role,
|
||||||
|
// it may have just been created and not yet be
|
||||||
|
// propagated.
|
||||||
|
// AWS doesn't provide a machine-readable code for this
|
||||||
|
// specific error, so we're forced to do fragile message
|
||||||
|
// matching.
|
||||||
|
// The full error we're looking for looks something like
|
||||||
|
// the following:
|
||||||
|
// Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes
|
||||||
|
if opserr.Code() == "ValidationException" && strings.Contains(opserr.Message(), "not yet propagated") {
|
||||||
|
log.Printf("[INFO] Waiting for service IAM role to propagate")
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resource.RetryError{Err: cerr}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
stackId := *resp.StackId
|
||||||
|
d.SetId(stackId)
|
||||||
|
d.Set("id", stackId)
|
||||||
|
|
||||||
|
if inVpc {
|
||||||
|
// For VPC-based stacks, OpsWorks asynchronously creates some default
|
||||||
|
// security groups which must exist before layers can be created.
|
||||||
|
// Unfortunately it doesn't tell us what the ids of these are, so
|
||||||
|
// we can't actually check for them. Instead, we just wait a nominal
|
||||||
|
// amount of time for their creation to complete.
|
||||||
|
log.Print("[INFO] Waiting for OpsWorks built-in security groups to be created")
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsOpsworksStackUpdate(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStackUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
|
||||||
|
err := resourceAwsOpsworksStackValidate(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &opsworks.UpdateStackInput{
|
||||||
|
CustomJson: aws.String(d.Get("custom_json").(string)),
|
||||||
|
DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)),
|
||||||
|
DefaultRootDeviceType: aws.String(d.Get("default_root_device_type").(string)),
|
||||||
|
DefaultSshKeyName: aws.String(d.Get("default_ssh_key_name").(string)),
|
||||||
|
Name: aws.String(d.Get("name").(string)),
|
||||||
|
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
|
||||||
|
StackId: aws.String(d.Id()),
|
||||||
|
UseCustomCookbooks: aws.Bool(d.Get("use_custom_cookbooks").(bool)),
|
||||||
|
UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)),
|
||||||
|
Attributes: make(map[string]*string),
|
||||||
|
CustomCookbooksSource: resourceAwsOpsworksStackCustomCookbooksSource(d),
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("default_os"); ok {
|
||||||
|
req.DefaultOs = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("default_subnet_id"); ok {
|
||||||
|
req.DefaultSubnetId = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("default_availability_zone"); ok {
|
||||||
|
req.DefaultAvailabilityZone = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("hostname_theme"); ok {
|
||||||
|
req.HostnameTheme = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("color"); ok {
|
||||||
|
req.Attributes["Color"] = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
req.ChefConfiguration = &opsworks.ChefConfiguration{
|
||||||
|
BerkshelfVersion: aws.String(d.Get("berkshelf_version").(string)),
|
||||||
|
ManageBerkshelf: aws.Bool(d.Get("manage_berkshelf").(bool)),
|
||||||
|
}
|
||||||
|
req.ConfigurationManager = &opsworks.StackConfigurationManager{
|
||||||
|
Name: aws.String(d.Get("configuration_manager_name").(string)),
|
||||||
|
Version: aws.String(d.Get("configuration_manager_version").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Updating OpsWorks stack: %s", d.Id())
|
||||||
|
|
||||||
|
_, err = client.UpdateStack(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsOpsworksStackRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStackDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*AWSClient).opsworksconn
|
||||||
|
|
||||||
|
req := &opsworks.DeleteStackInput{
|
||||||
|
StackId: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting OpsWorks stack: %s", d.Id())
|
||||||
|
|
||||||
|
_, err := client.DeleteStack(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For a stack in a VPC, OpsWorks has created some default security groups
|
||||||
|
// in the VPC, which it will now delete.
|
||||||
|
// Unfortunately, the security groups are deleted asynchronously and there
|
||||||
|
// is no robust way for us to determine when it is done. The VPC itself
|
||||||
|
// isn't deletable until the security groups are cleaned up, so this could
|
||||||
|
// make 'terraform destroy' fail if the VPC is also managed and we don't
|
||||||
|
// wait for the security groups to be deleted.
|
||||||
|
// There is no robust way to check for this, so we'll just wait a
|
||||||
|
// nominal amount of time.
|
||||||
|
if _, ok := d.GetOk("vpc_id"); ok {
|
||||||
|
log.Print("[INFO] Waiting for Opsworks built-in security groups to be deleted")
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,353 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
|
||||||
|
// and `aws-opsworks-service-role`.
|
||||||
|
|
||||||
|
///////////////////////////////
|
||||||
|
//// Tests for the No-VPC case
|
||||||
|
///////////////////////////////
|
||||||
|
|
||||||
|
var testAccAwsOpsworksStackConfigNoVpcCreate = `
|
||||||
|
resource "aws_opsworks_stack" "tf-acc" {
|
||||||
|
name = "tf-opsworks-acc"
|
||||||
|
region = "us-west-2"
|
||||||
|
service_role_arn = "%s"
|
||||||
|
default_instance_profile_arn = "%s"
|
||||||
|
default_availability_zone = "us-west-2a"
|
||||||
|
default_os = "Amazon Linux 2014.09"
|
||||||
|
default_root_device_type = "ebs"
|
||||||
|
custom_json = "{\"key\": \"value\"}"
|
||||||
|
configuration_manager_version = "11.10"
|
||||||
|
use_opsworks_security_groups = false
|
||||||
|
}
|
||||||
|
`
|
||||||
|
var testAccAWSOpsworksStackConfigNoVpcUpdate = `
|
||||||
|
resource "aws_opsworks_stack" "tf-acc" {
|
||||||
|
name = "tf-opsworks-acc"
|
||||||
|
region = "us-west-2"
|
||||||
|
service_role_arn = "%s"
|
||||||
|
default_instance_profile_arn = "%s"
|
||||||
|
default_availability_zone = "us-west-2a"
|
||||||
|
default_os = "Amazon Linux 2014.09"
|
||||||
|
default_root_device_type = "ebs"
|
||||||
|
custom_json = "{\"key\": \"value\"}"
|
||||||
|
configuration_manager_version = "11.10"
|
||||||
|
use_opsworks_security_groups = false
|
||||||
|
use_custom_cookbooks = true
|
||||||
|
manage_berkshelf = true
|
||||||
|
custom_cookbooks_source {
|
||||||
|
type = "git"
|
||||||
|
revision = "master"
|
||||||
|
url = "https://github.com/awslabs/opsworks-example-cookbooks.git"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func TestAccAwsOpsworksStackNoVpc(t *testing.T) {
|
||||||
|
opsiam := testAccAwsOpsworksStackIam{}
|
||||||
|
testAccAwsOpsworksStackPopulateIam(t, &opsiam)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccAwsOpsworksStackConfigNoVpcCreate, opsiam.ServiceRoleArn, opsiam.InstanceProfileArn),
|
||||||
|
Check: testAccAwsOpsworksStackCheckResourceAttrsCreate,
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccAWSOpsworksStackConfigNoVpcUpdate, opsiam.ServiceRoleArn, opsiam.InstanceProfileArn),
|
||||||
|
Check: testAccAwsOpsworksStackCheckResourceAttrsUpdate,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////
|
||||||
|
//// Tests for the VPC case
|
||||||
|
////////////////////////////
|
||||||
|
|
||||||
|
var testAccAwsOpsworksStackConfigVpcCreate = `
|
||||||
|
resource "aws_vpc" "tf-acc" {
|
||||||
|
cidr_block = "10.3.5.0/24"
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "tf-acc" {
|
||||||
|
vpc_id = "${aws_vpc.tf-acc.id}"
|
||||||
|
cidr_block = "${aws_vpc.tf-acc.cidr_block}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
resource "aws_opsworks_stack" "tf-acc" {
|
||||||
|
name = "tf-opsworks-acc"
|
||||||
|
region = "us-west-2"
|
||||||
|
vpc_id = "${aws_vpc.tf-acc.id}"
|
||||||
|
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
||||||
|
service_role_arn = "%s"
|
||||||
|
default_instance_profile_arn = "%s"
|
||||||
|
default_os = "Amazon Linux 2014.09"
|
||||||
|
default_root_device_type = "ebs"
|
||||||
|
custom_json = "{\"key\": \"value\"}"
|
||||||
|
configuration_manager_version = "11.10"
|
||||||
|
use_opsworks_security_groups = false
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSOpsworksStackConfigVpcUpdate = `
|
||||||
|
resource "aws_vpc" "tf-acc" {
|
||||||
|
cidr_block = "10.3.5.0/24"
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "tf-acc" {
|
||||||
|
vpc_id = "${aws_vpc.tf-acc.id}"
|
||||||
|
cidr_block = "${aws_vpc.tf-acc.cidr_block}"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
resource "aws_opsworks_stack" "tf-acc" {
|
||||||
|
name = "tf-opsworks-acc"
|
||||||
|
region = "us-west-2"
|
||||||
|
vpc_id = "${aws_vpc.tf-acc.id}"
|
||||||
|
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
||||||
|
service_role_arn = "%s"
|
||||||
|
default_instance_profile_arn = "%s"
|
||||||
|
default_os = "Amazon Linux 2014.09"
|
||||||
|
default_root_device_type = "ebs"
|
||||||
|
custom_json = "{\"key\": \"value\"}"
|
||||||
|
configuration_manager_version = "11.10"
|
||||||
|
use_opsworks_security_groups = false
|
||||||
|
use_custom_cookbooks = true
|
||||||
|
manage_berkshelf = true
|
||||||
|
custom_cookbooks_source {
|
||||||
|
type = "git"
|
||||||
|
revision = "master"
|
||||||
|
url = "https://github.com/awslabs/opsworks-example-cookbooks.git"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func TestAccAwsOpsworksStackVpc(t *testing.T) {
|
||||||
|
opsiam := testAccAwsOpsworksStackIam{}
|
||||||
|
testAccAwsOpsworksStackPopulateIam(t, &opsiam)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccAwsOpsworksStackConfigVpcCreate, opsiam.ServiceRoleArn, opsiam.InstanceProfileArn),
|
||||||
|
Check: testAccAwsOpsworksStackCheckResourceAttrsCreate,
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccAWSOpsworksStackConfigVpcUpdate, opsiam.ServiceRoleArn, opsiam.InstanceProfileArn),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccAwsOpsworksStackCheckResourceAttrsUpdate,
|
||||||
|
testAccAwsOpsworksCheckVpc,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////
|
||||||
|
//// Checkers and Utilities
|
||||||
|
////////////////////////////
|
||||||
|
|
||||||
|
var testAccAwsOpsworksStackCheckResourceAttrsCreate = resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"name",
|
||||||
|
"tf-opsworks-acc",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"default_availability_zone",
|
||||||
|
"us-west-2a",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"default_os",
|
||||||
|
"Amazon Linux 2014.09",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"default_root_device_type",
|
||||||
|
"ebs",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"custom_json",
|
||||||
|
`{"key": "value"}`,
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"configuration_manager_version",
|
||||||
|
"11.10",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"use_opsworks_security_groups",
|
||||||
|
"false",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
var testAccAwsOpsworksStackCheckResourceAttrsUpdate = resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"name",
|
||||||
|
"tf-opsworks-acc",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"default_availability_zone",
|
||||||
|
"us-west-2a",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"default_os",
|
||||||
|
"Amazon Linux 2014.09",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"default_root_device_type",
|
||||||
|
"ebs",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"custom_json",
|
||||||
|
`{"key": "value"}`,
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"configuration_manager_version",
|
||||||
|
"11.10",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"use_opsworks_security_groups",
|
||||||
|
"false",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"use_custom_cookbooks",
|
||||||
|
"true",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"manage_berkshelf",
|
||||||
|
"true",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"custom_cookbooks_source.0.type",
|
||||||
|
"git",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"custom_cookbooks_source.0.revision",
|
||||||
|
"master",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
"custom_cookbooks_source.0.url",
|
||||||
|
"https://github.com/awslabs/opsworks-example-cookbooks.git",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
func testAccAwsOpsworksCheckVpc(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources["aws_opsworks_stack.tf-acc"]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", "aws_opsworks_stack.tf-acc")
|
||||||
|
}
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
p := rs.Primary
|
||||||
|
|
||||||
|
opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn
|
||||||
|
describeOpts := &opsworks.DescribeStacksInput{
|
||||||
|
StackIds: []*string{aws.String(p.ID)},
|
||||||
|
}
|
||||||
|
resp, err := opsworksconn.DescribeStacks(describeOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(resp.Stacks) == 0 {
|
||||||
|
return fmt.Errorf("No stack %s not found", p.ID)
|
||||||
|
}
|
||||||
|
if p.Attributes["vpc_id"] != *resp.Stacks[0].VpcId {
|
||||||
|
return fmt.Errorf("VPCID Got %s, expected %s", *resp.Stacks[0].VpcId, p.Attributes["vpc_id"])
|
||||||
|
}
|
||||||
|
if p.Attributes["default_subnet_id"] != *resp.Stacks[0].DefaultSubnetId {
|
||||||
|
return fmt.Errorf("VPCID Got %s, expected %s", *resp.Stacks[0].DefaultSubnetId, p.Attributes["default_subnet_id"])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
|
||||||
|
if len(s.RootModule().Resources) > 0 {
|
||||||
|
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Holds the two IAM object ARNs used in stack objects we'll create.
|
||||||
|
type testAccAwsOpsworksStackIam struct {
|
||||||
|
ServiceRoleArn string
|
||||||
|
InstanceProfileArn string
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAwsOpsworksStackPopulateIam(t *testing.T, opsiam *testAccAwsOpsworksStackIam) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccInstanceConfig_pre, // noop
|
||||||
|
Check: testAccCheckAwsOpsworksEnsureIam(t, opsiam),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAwsOpsworksEnsureIam(t *testing.T, opsiam *testAccAwsOpsworksStackIam) func(*terraform.State) error {
|
||||||
|
return func(_ *terraform.State) error {
|
||||||
|
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||||
|
|
||||||
|
serviceRoleOpts := &iam.GetRoleInput{
|
||||||
|
RoleName: aws.String("aws-opsworks-service-role"),
|
||||||
|
}
|
||||||
|
respServiceRole, err := iamconn.GetRole(serviceRoleOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceProfileOpts := &iam.GetInstanceProfileInput{
|
||||||
|
InstanceProfileName: aws.String("aws-opsworks-ec2-role"),
|
||||||
|
}
|
||||||
|
respInstanceProfile, err := iamconn.GetInstanceProfile(instanceProfileOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opsiam.ServiceRoleArn = *respServiceRole.Role.Arn
|
||||||
|
opsiam.InstanceProfileArn = *respInstanceProfile.InstanceProfile.Arn
|
||||||
|
|
||||||
|
t.Logf("[DEBUG] ServiceRoleARN for OpsWorks: %s", opsiam.ServiceRoleArn)
|
||||||
|
t.Logf("[DEBUG] Instance Profile ARN for OpsWorks: %s", opsiam.InstanceProfileArn)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsOpsworksStaticWebLayer() *schema.Resource {
|
||||||
|
layerType := &opsworksLayerType{
|
||||||
|
TypeName: "web",
|
||||||
|
DefaultLayerName: "Static Web Server",
|
||||||
|
|
||||||
|
Attributes: map[string]*opsworksLayerTypeAttribute{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return layerType.SchemaResource()
|
||||||
|
}
|
|
@ -0,0 +1,150 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsPlacementGroup() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsPlacementGroupCreate,
|
||||||
|
Read: resourceAwsPlacementGroupRead,
|
||||||
|
Delete: resourceAwsPlacementGroupDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"strategy": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsPlacementGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
input := ec2.CreatePlacementGroupInput{
|
||||||
|
GroupName: aws.String(name),
|
||||||
|
Strategy: aws.String(d.Get("strategy").(string)),
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Creating EC2 Placement group: %s", input)
|
||||||
|
_, err := conn.CreatePlacementGroup(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
wait := resource.StateChangeConf{
|
||||||
|
Pending: []string{"pending"},
|
||||||
|
Target: "available",
|
||||||
|
Timeout: 5 * time.Minute,
|
||||||
|
MinTimeout: 1 * time.Second,
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{
|
||||||
|
GroupNames: []*string{aws.String(name)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return out, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.PlacementGroups) == 0 {
|
||||||
|
return out, "", fmt.Errorf("Placement group not found (%q)", name)
|
||||||
|
}
|
||||||
|
pg := out.PlacementGroups[0]
|
||||||
|
|
||||||
|
return out, *pg.State, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = wait.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] EC2 Placement group created: %q", name)
|
||||||
|
|
||||||
|
d.SetId(name)
|
||||||
|
|
||||||
|
return resourceAwsPlacementGroupRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsPlacementGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).ec2conn
|
||||||
|
input := ec2.DescribePlacementGroupsInput{
|
||||||
|
GroupNames: []*string{aws.String(d.Get("name").(string))},
|
||||||
|
}
|
||||||
|
out, err := conn.DescribePlacementGroups(&input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pg := out.PlacementGroups[0]
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Received EC2 Placement Group: %s", pg)
|
||||||
|
|
||||||
|
d.Set("name", pg.GroupName)
|
||||||
|
d.Set("strategy", pg.Strategy)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsPlacementGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting EC2 Placement Group %q", d.Id())
|
||||||
|
_, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{
|
||||||
|
GroupName: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
wait := resource.StateChangeConf{
|
||||||
|
Pending: []string{"deleting"},
|
||||||
|
Target: "deleted",
|
||||||
|
Timeout: 5 * time.Minute,
|
||||||
|
MinTimeout: 1 * time.Second,
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{
|
||||||
|
GroupNames: []*string{aws.String(d.Id())},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
awsErr := err.(awserr.Error)
|
||||||
|
if awsErr.Code() == "InvalidPlacementGroup.Unknown" {
|
||||||
|
return out, "deleted", nil
|
||||||
|
}
|
||||||
|
return out, "", awsErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.PlacementGroups) == 0 {
|
||||||
|
return out, "deleted", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pg := out.PlacementGroups[0]
|
||||||
|
|
||||||
|
return out, *pg.State, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = wait.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,98 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSPlacementGroup_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSPlacementGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSPlacementGroupConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSPlacementGroupExists("aws_placement_group.pg"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSPlacementGroupDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_placement_group" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{
|
||||||
|
GroupName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSPlacementGroupExists(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No Placement Group ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
|
_, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{
|
||||||
|
GroupNames: []*string{aws.String(rs.Primary.ID)},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Placement Group error: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSDestroyPlacementGroup(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No Placement Group ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
|
_, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{
|
||||||
|
GroupName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error destroying Placement Group (%s): %s", rs.Primary.ID, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccAWSPlacementGroupConfig = `
|
||||||
|
resource "aws_placement_group" "pg" {
|
||||||
|
name = "tf-test-pg"
|
||||||
|
strategy = "cluster"
|
||||||
|
}
|
||||||
|
`
|
|
@ -0,0 +1,347 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsRDSCluster() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsRDSClusterCreate,
|
||||||
|
Read: resourceAwsRDSClusterRead,
|
||||||
|
Update: resourceAwsRDSClusterUpdate,
|
||||||
|
Delete: resourceAwsRDSClusterDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
|
||||||
|
"availability_zones": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"cluster_identifier": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: validateRdsId,
|
||||||
|
},
|
||||||
|
|
||||||
|
"cluster_members": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"database_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"db_subnet_group_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"endpoint": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"engine": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"final_snapshot_identifier": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
||||||
|
es = append(es, fmt.Errorf(
|
||||||
|
"only alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||||
|
es = append(es, fmt.Errorf("%q cannot end in a hyphen", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"master_username": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"master_password": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// apply_immediately is used to determine when the update modifications
|
||||||
|
// take place.
|
||||||
|
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
|
||||||
|
"apply_immediately": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"vpc_security_group_ids": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
createOpts := &rds.CreateDBClusterInput{
|
||||||
|
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
||||||
|
Engine: aws.String("aurora"),
|
||||||
|
MasterUserPassword: aws.String(d.Get("master_password").(string)),
|
||||||
|
MasterUsername: aws.String(d.Get("master_username").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := d.Get("database_name"); v.(string) != "" {
|
||||||
|
createOpts.DatabaseName = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr, ok := d.GetOk("port"); ok {
|
||||||
|
createOpts.Port = aws.Int64(int64(attr.(int)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
||||||
|
createOpts.DBSubnetGroupName = aws.String(attr.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||||
|
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 {
|
||||||
|
createOpts.AvailabilityZones = expandStringList(attr.List())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
|
||||||
|
resp, err := conn.CreateDBCluster(createOpts)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERROR] Error creating RDS Cluster: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG]: Cluster create response: %s", resp)
|
||||||
|
d.SetId(*resp.DBCluster.DBClusterIdentifier)
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"creating", "backing-up", "modifying"},
|
||||||
|
Target: "available",
|
||||||
|
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
|
||||||
|
Timeout: 5 * time.Minute,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait, catching any errors
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsRDSClusterRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if "DBClusterNotFoundFault" == awsErr.Code() {
|
||||||
|
d.SetId("")
|
||||||
|
log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var dbc *rds.DBCluster
|
||||||
|
for _, c := range resp.DBClusters {
|
||||||
|
if *c.DBClusterIdentifier == d.Id() {
|
||||||
|
dbc = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbc == nil {
|
||||||
|
log.Printf("[WARN] RDS Cluster (%s) not found", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil {
|
||||||
|
return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
d.Set("database_name", dbc.DatabaseName)
|
||||||
|
d.Set("db_subnet_group_name", dbc.DBSubnetGroup)
|
||||||
|
d.Set("endpoint", dbc.Endpoint)
|
||||||
|
d.Set("engine", dbc.Engine)
|
||||||
|
d.Set("master_username", dbc.MasterUsername)
|
||||||
|
d.Set("port", dbc.Port)
|
||||||
|
|
||||||
|
var vpcg []string
|
||||||
|
for _, g := range dbc.VpcSecurityGroups {
|
||||||
|
vpcg = append(vpcg, *g.VpcSecurityGroupId)
|
||||||
|
}
|
||||||
|
if err := d.Set("vpc_security_group_ids", vpcg); err != nil {
|
||||||
|
return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cm []string
|
||||||
|
for _, m := range dbc.DBClusterMembers {
|
||||||
|
cm = append(cm, *m.DBInstanceIdentifier)
|
||||||
|
}
|
||||||
|
if err := d.Set("cluster_members", cm); err != nil {
|
||||||
|
return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
req := &rds.ModifyDBClusterInput{
|
||||||
|
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
|
||||||
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("master_password") {
|
||||||
|
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("vpc_security_group_ids") {
|
||||||
|
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||||
|
req.VpcSecurityGroupIds = expandStringList(attr.List())
|
||||||
|
} else {
|
||||||
|
req.VpcSecurityGroupIds = []*string{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.ModifyDBCluster(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsRDSClusterRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id())
|
||||||
|
|
||||||
|
deleteOpts := rds.DeleteDBClusterInput{
|
||||||
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
finalSnapshot := d.Get("final_snapshot_identifier").(string)
|
||||||
|
if finalSnapshot == "" {
|
||||||
|
deleteOpts.SkipFinalSnapshot = aws.Bool(true)
|
||||||
|
} else {
|
||||||
|
deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
|
||||||
|
deleteOpts.SkipFinalSnapshot = aws.Bool(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)
|
||||||
|
_, err := conn.DeleteDBCluster(&deleteOpts)
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"deleting", "backing-up", "modifying"},
|
||||||
|
Target: "destroyed",
|
||||||
|
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
|
||||||
|
Timeout: 5 * time.Minute,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait, catching any errors
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterStateRefreshFunc(
|
||||||
|
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
|
||||||
|
return func() (interface{}, string, error) {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if "DBClusterNotFoundFault" == awsErr.Code() {
|
||||||
|
return 42, "destroyed", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err)
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var dbc *rds.DBCluster
|
||||||
|
|
||||||
|
for _, c := range resp.DBClusters {
|
||||||
|
if *c.DBClusterIdentifier == d.Id() {
|
||||||
|
dbc = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbc == nil {
|
||||||
|
return 42, "destroyed", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbc.Status != nil {
|
||||||
|
log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dbc, *dbc.Status, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,220 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterInstance() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsRDSClusterInstanceCreate,
|
||||||
|
Read: resourceAwsRDSClusterInstanceRead,
|
||||||
|
Update: resourceAwsRDSClusterInstanceUpdate,
|
||||||
|
Delete: resourceAwsRDSClusterInstanceDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"identifier": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: validateRdsId,
|
||||||
|
},
|
||||||
|
|
||||||
|
"db_subnet_group_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"writer": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"cluster_identifier": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"endpoint": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"publicly_accessible": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"instance_class": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"tags": tagsSchema(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
|
createOpts := &rds.CreateDBInstanceInput{
|
||||||
|
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
||||||
|
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
||||||
|
Engine: aws.String("aurora"),
|
||||||
|
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
|
||||||
|
Tags: tags,
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := d.Get("identifier").(string); v != "" {
|
||||||
|
createOpts.DBInstanceIdentifier = aws.String(v)
|
||||||
|
} else {
|
||||||
|
createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId())
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
||||||
|
createOpts.DBSubnetGroupName = aws.String(attr.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts)
|
||||||
|
resp, err := conn.CreateDBInstance(createOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(*resp.DBInstance.DBInstanceIdentifier)
|
||||||
|
|
||||||
|
// reuse db_instance refresh func
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"creating", "backing-up", "modifying"},
|
||||||
|
Target: "available",
|
||||||
|
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
|
||||||
|
Timeout: 40 * time.Minute,
|
||||||
|
MinTimeout: 10 * time.Second,
|
||||||
|
Delay: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait, catching any errors
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsRDSClusterInstanceRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
db, err := resourceAwsDbInstanceRetrieve(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err)
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retreive DB Cluster information, to determine if this Instance is a writer
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
|
DBClusterIdentifier: db.DBClusterIdentifier,
|
||||||
|
})
|
||||||
|
|
||||||
|
var dbc *rds.DBCluster
|
||||||
|
for _, c := range resp.DBClusters {
|
||||||
|
if *c.DBClusterIdentifier == *db.DBClusterIdentifier {
|
||||||
|
dbc = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbc == nil {
|
||||||
|
return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s",
|
||||||
|
*db.DBClusterIdentifier, *db.DBInstanceIdentifier, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range dbc.DBClusterMembers {
|
||||||
|
if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier {
|
||||||
|
if *m.IsClusterWriter == true {
|
||||||
|
d.Set("writer", true)
|
||||||
|
} else {
|
||||||
|
d.Set("writer", false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if db.Endpoint != nil {
|
||||||
|
d.Set("endpoint", db.Endpoint.Address)
|
||||||
|
d.Set("port", db.Endpoint.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("publicly_accessible", db.PubliclyAccessible)
|
||||||
|
|
||||||
|
// Fetch and save tags
|
||||||
|
arn, err := buildRDSARN(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier)
|
||||||
|
} else {
|
||||||
|
if err := saveTagsRDS(conn, d, arn); err != nil {
|
||||||
|
log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
if arn, err := buildRDSARN(d, meta); err == nil {
|
||||||
|
if err := setTagsRDS(conn, d, arn); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsRDSClusterInstanceRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id())
|
||||||
|
|
||||||
|
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts)
|
||||||
|
if _, err := conn.DeleteDBInstance(&opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-uses db_instance refresh func
|
||||||
|
log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed")
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"modifying", "deleting"},
|
||||||
|
Target: "",
|
||||||
|
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
|
||||||
|
Timeout: 40 * time.Minute,
|
||||||
|
MinTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := stateConf.WaitForState(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,134 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSRDSClusterInstance_basic(t *testing.T) {
|
||||||
|
var v rds.DBInstance
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSClusterInstanceConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
|
||||||
|
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error {
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_rds_cluster" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to find the Group
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
|
var err error
|
||||||
|
resp, err := conn.DescribeDBInstances(
|
||||||
|
&rds.DescribeDBInstancesInput{
|
||||||
|
DBInstanceIdentifier: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if len(resp.DBInstances) != 0 &&
|
||||||
|
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
|
||||||
|
return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return nil if the Cluster Instance is already destroyed
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "DBInstanceNotFound" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
|
||||||
|
if *v.Engine != "aurora" {
|
||||||
|
return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") {
|
||||||
|
return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No DB Instance ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
|
resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{
|
||||||
|
DBInstanceIdentifier: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range resp.DBInstances {
|
||||||
|
if *d.DBInstanceIdentifier == rs.Primary.ID {
|
||||||
|
*v = *d
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add some random to the name, to avoid collision
|
||||||
|
var testAccAWSClusterInstanceConfig = fmt.Sprintf(`
|
||||||
|
resource "aws_rds_cluster" "default" {
|
||||||
|
cluster_identifier = "tf-aurora-cluster-test-%d"
|
||||||
|
availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
|
||||||
|
database_name = "mydb"
|
||||||
|
master_username = "foo"
|
||||||
|
master_password = "mustbeeightcharaters"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_rds_cluster_instance" "cluster_instances" {
|
||||||
|
identifier = "aurora-cluster-test-instance"
|
||||||
|
cluster_identifier = "${aws_rds_cluster.default.id}"
|
||||||
|
instance_class = "db.r3.large"
|
||||||
|
}
|
||||||
|
|
||||||
|
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
|
@ -0,0 +1,108 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSRDSCluster_basic(t *testing.T) {
|
||||||
|
var v rds.DBCluster
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSClusterConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_rds_cluster" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to find the Group
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
|
var err error
|
||||||
|
resp, err := conn.DescribeDBClusters(
|
||||||
|
&rds.DescribeDBClustersInput{
|
||||||
|
DBClusterIdentifier: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if len(resp.DBClusters) != 0 &&
|
||||||
|
*resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID {
|
||||||
|
return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return nil if the cluster is already destroyed
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "DBClusterNotFound" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No DB Instance ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
|
DBClusterIdentifier: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range resp.DBClusters {
|
||||||
|
if *c.DBClusterIdentifier == rs.Primary.ID {
|
||||||
|
*v = *c
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add some random to the name, to avoid collision
|
||||||
|
var testAccAWSClusterConfig = fmt.Sprintf(`
|
||||||
|
resource "aws_rds_cluster" "default" {
|
||||||
|
cluster_identifier = "tf-aurora-cluster-%d"
|
||||||
|
availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
|
||||||
|
database_name = "mydb"
|
||||||
|
master_username = "foo"
|
||||||
|
master_password = "mustbeeightcharaters"
|
||||||
|
}`, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
|
@ -2,6 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -212,12 +213,16 @@ func testAccCheckRouteTableExists(n string, v *ec2.RouteTable) resource.TestChec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: re-enable this test.
|
|
||||||
// VPC Peering connections are prefixed with pcx
|
// VPC Peering connections are prefixed with pcx
|
||||||
// Right now there is no VPC Peering resource
|
// Right now there is no VPC Peering resource
|
||||||
func _TestAccAWSRouteTable_vpcPeering(t *testing.T) {
|
func TestAccAWSRouteTable_vpcPeering(t *testing.T) {
|
||||||
var v ec2.RouteTable
|
var v ec2.RouteTable
|
||||||
|
|
||||||
|
acctId := os.Getenv("TF_ACC_ID")
|
||||||
|
if acctId == "" && os.Getenv(resource.TestEnvVar) != "" {
|
||||||
|
t.Fatal("Error: Test TestAccAWSRouteTable_vpcPeering requires an Account ID in TF_ACC_ID ")
|
||||||
|
}
|
||||||
|
|
||||||
testCheck := func(*terraform.State) error {
|
testCheck := func(*terraform.State) error {
|
||||||
if len(v.Routes) != 2 {
|
if len(v.Routes) != 2 {
|
||||||
return fmt.Errorf("bad routes: %#v", v.Routes)
|
return fmt.Errorf("bad routes: %#v", v.Routes)
|
||||||
|
@ -243,7 +248,7 @@ func _TestAccAWSRouteTable_vpcPeering(t *testing.T) {
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccRouteTableVpcPeeringConfig,
|
Config: testAccRouteTableVpcPeeringConfig(acctId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
"aws_route_table.foo", &v),
|
"aws_route_table.foo", &v),
|
||||||
|
@ -395,11 +400,10 @@ resource "aws_route_table" "foo" {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
// TODO: re-enable this test.
|
|
||||||
// VPC Peering connections are prefixed with pcx
|
// VPC Peering connections are prefixed with pcx
|
||||||
// Right now there is no VPC Peering resource
|
// This test requires an ENV var, TF_ACC_ID, with a valid AWS Account ID
|
||||||
const testAccRouteTableVpcPeeringConfig = `
|
func testAccRouteTableVpcPeeringConfig(acc string) string {
|
||||||
resource "aws_vpc" "foo" {
|
cfg := `resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -407,15 +411,34 @@ resource "aws_internet_gateway" "foo" {
|
||||||
vpc_id = "${aws_vpc.foo.id}"
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "bar" {
|
||||||
|
cidr_block = "10.3.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_internet_gateway" "bar" {
|
||||||
|
vpc_id = "${aws_vpc.bar.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc_peering_connection" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
peer_vpc_id = "${aws_vpc.bar.id}"
|
||||||
|
peer_owner_id = "%s"
|
||||||
|
tags {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resource "aws_route_table" "foo" {
|
resource "aws_route_table" "foo" {
|
||||||
vpc_id = "${aws_vpc.foo.id}"
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
|
||||||
route {
|
route {
|
||||||
cidr_block = "10.2.0.0/16"
|
cidr_block = "10.2.0.0/16"
|
||||||
vpc_peering_connection_id = "pcx-12345"
|
vpc_peering_connection_id = "${aws_vpc_peering_connection.foo.id}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
return fmt.Sprintf(cfg, acc)
|
||||||
|
}
|
||||||
|
|
||||||
const testAccRouteTableVgwRoutePropagationConfig = `
|
const testAccRouteTableVgwRoutePropagationConfig = `
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
|
|
|
@ -464,6 +464,9 @@ func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) err
|
||||||
return fmt.Errorf("Error deleting S3 website: %s", err)
|
return fmt.Errorf("Error deleting S3 website: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Set("website_endpoint", "")
|
||||||
|
d.Set("website_domain", "")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
@ -16,7 +18,6 @@ func resourceAwsS3BucketObject() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsS3BucketObjectPut,
|
Create: resourceAwsS3BucketObjectPut,
|
||||||
Read: resourceAwsS3BucketObjectRead,
|
Read: resourceAwsS3BucketObjectRead,
|
||||||
Update: resourceAwsS3BucketObjectPut,
|
|
||||||
Delete: resourceAwsS3BucketObjectDelete,
|
Delete: resourceAwsS3BucketObjectDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
@ -26,6 +27,37 @@ func resourceAwsS3BucketObject() *schema.Resource {
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"cache_control": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"content_disposition": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"content_encoding": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"content_language": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"content_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"key": &schema.Schema{
|
"key": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
@ -34,8 +66,16 @@ func resourceAwsS3BucketObject() *schema.Resource {
|
||||||
|
|
||||||
"source": &schema.Schema{
|
"source": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"content"},
|
||||||
|
},
|
||||||
|
|
||||||
|
"content": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"source"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"etag": &schema.Schema{
|
"etag": &schema.Schema{
|
||||||
|
@ -51,21 +91,50 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro
|
||||||
|
|
||||||
bucket := d.Get("bucket").(string)
|
bucket := d.Get("bucket").(string)
|
||||||
key := d.Get("key").(string)
|
key := d.Get("key").(string)
|
||||||
source := d.Get("source").(string)
|
var body io.ReadSeeker
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("source"); ok {
|
||||||
|
source := v.(string)
|
||||||
file, err := os.Open(source)
|
file, err := os.Open(source)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
|
return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s3conn.PutObject(
|
body = file
|
||||||
&s3.PutObjectInput{
|
} else if v, ok := d.GetOk("content"); ok {
|
||||||
|
content := v.(string)
|
||||||
|
body = bytes.NewReader([]byte(content))
|
||||||
|
} else {
|
||||||
|
|
||||||
|
return fmt.Errorf("Must specify \"source\" or \"content\" field")
|
||||||
|
}
|
||||||
|
putInput := &s3.PutObjectInput{
|
||||||
Bucket: aws.String(bucket),
|
Bucket: aws.String(bucket),
|
||||||
Key: aws.String(key),
|
Key: aws.String(key),
|
||||||
Body: file,
|
Body: body,
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("cache_control"); ok {
|
||||||
|
putInput.CacheControl = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("content_type"); ok {
|
||||||
|
putInput.ContentType = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("content_encoding"); ok {
|
||||||
|
putInput.ContentEncoding = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("content_language"); ok {
|
||||||
|
putInput.ContentLanguage = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("content_disposition"); ok {
|
||||||
|
putInput.ContentDisposition = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := s3conn.PutObject(putInput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err)
|
return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err)
|
||||||
}
|
}
|
||||||
|
@ -99,6 +168,12 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Set("cache_control", resp.CacheControl)
|
||||||
|
d.Set("content_disposition", resp.ContentDisposition)
|
||||||
|
d.Set("content_encoding", resp.ContentEncoding)
|
||||||
|
d.Set("content_language", resp.ContentLanguage)
|
||||||
|
d.Set("content_type", resp.ContentType)
|
||||||
|
|
||||||
log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
|
log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
|
|
||||||
var tf, err = ioutil.TempFile("", "tf")
|
var tf, err = ioutil.TempFile("", "tf")
|
||||||
|
|
||||||
func TestAccAWSS3BucketObject_basic(t *testing.T) {
|
func TestAccAWSS3BucketObject_source(t *testing.T) {
|
||||||
// first write some data to the tempfile just so it's not 0 bytes.
|
// first write some data to the tempfile just so it's not 0 bytes.
|
||||||
ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644)
|
ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644)
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
|
@ -29,13 +29,57 @@ func TestAccAWSS3BucketObject_basic(t *testing.T) {
|
||||||
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
|
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSS3BucketObjectConfig,
|
Config: testAccAWSS3BucketObjectConfigSource,
|
||||||
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
|
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSS3BucketObject_content(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSS3BucketObjectConfigContent,
|
||||||
|
Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSS3BucketObject_withContentCharacteristics(t *testing.T) {
|
||||||
|
// first write some data to the tempfile just so it's not 0 bytes.
|
||||||
|
ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644)
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSS3BucketObjectConfig_withContentCharacteristics,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_s3_bucket_object.object", "content_type", "binary/octet-stream"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSS3BucketObjectDestroy(s *terraform.State) error {
|
func testAccCheckAWSS3BucketObjectDestroy(s *terraform.State) error {
|
||||||
s3conn := testAccProvider.Meta().(*AWSClient).s3conn
|
s3conn := testAccProvider.Meta().(*AWSClient).s3conn
|
||||||
|
|
||||||
|
@ -86,14 +130,39 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc {
|
||||||
}
|
}
|
||||||
|
|
||||||
var randomBucket = randInt
|
var randomBucket = randInt
|
||||||
var testAccAWSS3BucketObjectConfig = fmt.Sprintf(`
|
var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(`
|
||||||
resource "aws_s3_bucket" "object_bucket" {
|
resource "aws_s3_bucket" "object_bucket" {
|
||||||
bucket = "tf-object-test-bucket-%d"
|
bucket = "tf-object-test-bucket-%d"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_s3_bucket_object" "object" {
|
resource "aws_s3_bucket_object" "object" {
|
||||||
bucket = "${aws_s3_bucket.object_bucket.bucket}"
|
bucket = "${aws_s3_bucket.object_bucket.bucket}"
|
||||||
key = "test-key"
|
key = "test-key"
|
||||||
source = "%s"
|
source = "%s"
|
||||||
|
content_type = "binary/octet-stream"
|
||||||
}
|
}
|
||||||
`, randomBucket, tf.Name())
|
`, randomBucket, tf.Name())
|
||||||
|
|
||||||
|
var testAccAWSS3BucketObjectConfig_withContentCharacteristics = fmt.Sprintf(`
|
||||||
|
resource "aws_s3_bucket" "object_bucket_2" {
|
||||||
|
bucket = "tf-object-test-bucket-%d"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_s3_bucket_object" "object" {
|
||||||
|
bucket = "${aws_s3_bucket.object_bucket_2.bucket}"
|
||||||
|
key = "test-key"
|
||||||
|
source = "%s"
|
||||||
|
content_language = "en"
|
||||||
|
content_type = "binary/octet-stream"
|
||||||
|
}
|
||||||
|
`, randomBucket, tf.Name())
|
||||||
|
|
||||||
|
var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(`
|
||||||
|
resource "aws_s3_bucket" "object_bucket" {
|
||||||
|
bucket = "tf-object-test-bucket-%d"
|
||||||
|
}
|
||||||
|
resource "aws_s3_bucket_object" "object" {
|
||||||
|
bucket = "${aws_s3_bucket.object_bucket.bucket}"
|
||||||
|
key = "test-key"
|
||||||
|
content = "some_bucket_content"
|
||||||
|
}
|
||||||
|
`, randomBucket)
|
||||||
|
|
|
@ -64,7 +64,7 @@ func TestAccAWSS3Bucket_Policy(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSS3Bucket_Website(t *testing.T) {
|
func TestAccAWSS3Bucket_Website_Simple(t *testing.T) {
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
|
|
@ -20,7 +20,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource {
|
||||||
Read: resourceAwsSecurityGroupRuleRead,
|
Read: resourceAwsSecurityGroupRuleRead,
|
||||||
Delete: resourceAwsSecurityGroupRuleDelete,
|
Delete: resourceAwsSecurityGroupRuleDelete,
|
||||||
|
|
||||||
SchemaVersion: 1,
|
SchemaVersion: 2,
|
||||||
MigrateState: resourceAwsSecurityGroupRuleMigrateState,
|
MigrateState: resourceAwsSecurityGroupRuleMigrateState,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
@ -67,7 +67,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource {
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ConflictsWith: []string{"cidr_blocks"},
|
ConflictsWith: []string{"cidr_blocks", "self"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"self": &schema.Schema{
|
"self": &schema.Schema{
|
||||||
|
@ -75,6 +75,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource {
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"cidr_blocks"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -142,7 +143,7 @@ information and instructions for recovery. Error message: %s`, awsErr.Message())
|
||||||
ruleType, autherr)
|
ruleType, autherr)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(ipPermissionIDHash(ruleType, perm))
|
d.SetId(ipPermissionIDHash(sg_id, ruleType, perm))
|
||||||
|
|
||||||
return resourceAwsSecurityGroupRuleRead(d, meta)
|
return resourceAwsSecurityGroupRuleRead(d, meta)
|
||||||
}
|
}
|
||||||
|
@ -158,24 +159,69 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
var rule *ec2.IpPermission
|
var rule *ec2.IpPermission
|
||||||
|
var rules []*ec2.IpPermission
|
||||||
ruleType := d.Get("type").(string)
|
ruleType := d.Get("type").(string)
|
||||||
var rl []*ec2.IpPermission
|
|
||||||
switch ruleType {
|
switch ruleType {
|
||||||
case "ingress":
|
case "ingress":
|
||||||
rl = sg.IpPermissions
|
rules = sg.IpPermissions
|
||||||
default:
|
default:
|
||||||
rl = sg.IpPermissionsEgress
|
rules = sg.IpPermissionsEgress
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, r := range rl {
|
p := expandIPPerm(d, sg)
|
||||||
if d.Id() == ipPermissionIDHash(ruleType, r) {
|
|
||||||
rule = r
|
if len(rules) == 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)",
|
||||||
|
ruleType, *sg.GroupName, d.Id())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, r := range rules {
|
||||||
|
if r.ToPort != nil && *p.ToPort != *r.ToPort {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.FromPort != nil && *p.FromPort != *r.FromPort {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining := len(p.IpRanges)
|
||||||
|
for _, ip := range p.IpRanges {
|
||||||
|
for _, rip := range r.IpRanges {
|
||||||
|
if *ip.CidrIp == *rip.CidrIp {
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if remaining > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining = len(p.UserIdGroupPairs)
|
||||||
|
for _, ip := range p.UserIdGroupPairs {
|
||||||
|
for _, rip := range r.UserIdGroupPairs {
|
||||||
|
if *ip.GroupId == *rip.GroupId {
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if remaining > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r)
|
||||||
|
rule = r
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
log.Printf("[DEBUG] Unable to find matching %s Security Group Rule for Group %s",
|
log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s",
|
||||||
ruleType, sg_id)
|
ruleType, d.Id(), sg_id)
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -186,14 +232,14 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{})
|
||||||
d.Set("type", ruleType)
|
d.Set("type", ruleType)
|
||||||
|
|
||||||
var cb []string
|
var cb []string
|
||||||
for _, c := range rule.IpRanges {
|
for _, c := range p.IpRanges {
|
||||||
cb = append(cb, *c.CidrIp)
|
cb = append(cb, *c.CidrIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("cidr_blocks", cb)
|
d.Set("cidr_blocks", cb)
|
||||||
|
|
||||||
if len(rule.UserIdGroupPairs) > 0 {
|
if len(p.UserIdGroupPairs) > 0 {
|
||||||
s := rule.UserIdGroupPairs[0]
|
s := p.UserIdGroupPairs[0]
|
||||||
d.Set("source_security_group_id", *s.GroupId)
|
d.Set("source_security_group_id", *s.GroupId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,8 +331,9 @@ func (b ByGroupPair) Less(i, j int) bool {
|
||||||
panic("mismatched security group rules, may be a terraform bug")
|
panic("mismatched security group rules, may be a terraform bug")
|
||||||
}
|
}
|
||||||
|
|
||||||
func ipPermissionIDHash(ruleType string, ip *ec2.IpPermission) string {
|
func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", sg_id))
|
||||||
if ip.FromPort != nil && *ip.FromPort > 0 {
|
if ip.FromPort != nil && *ip.FromPort > 0 {
|
||||||
buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort))
|
buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort))
|
||||||
}
|
}
|
||||||
|
@ -326,7 +373,7 @@ func ipPermissionIDHash(ruleType string, ip *ec2.IpPermission) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("sg-%d", hashcode.String(buf.String()))
|
return fmt.Sprintf("sgrule-%d", hashcode.String(buf.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IpPermission {
|
func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IpPermission {
|
||||||
|
|
|
@ -17,6 +17,12 @@ func resourceAwsSecurityGroupRuleMigrateState(
|
||||||
case 0:
|
case 0:
|
||||||
log.Println("[INFO] Found AWS Security Group State v0; migrating to v1")
|
log.Println("[INFO] Found AWS Security Group State v0; migrating to v1")
|
||||||
return migrateSGRuleStateV0toV1(is)
|
return migrateSGRuleStateV0toV1(is)
|
||||||
|
case 1:
|
||||||
|
log.Println("[INFO] Found AWS Security Group State v1; migrating to v2")
|
||||||
|
// migrating to version 2 of the schema is the same as 0->1, since the
|
||||||
|
// method signature has changed now and will use the security group id in
|
||||||
|
// the hash
|
||||||
|
return migrateSGRuleStateV0toV1(is)
|
||||||
default:
|
default:
|
||||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||||
}
|
}
|
||||||
|
@ -37,7 +43,7 @@ func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceS
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||||
newID := ipPermissionIDHash(is.Attributes["type"], perm)
|
newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm)
|
||||||
is.Attributes["id"] = newID
|
is.Attributes["id"] = newID
|
||||||
is.ID = newID
|
is.ID = newID
|
||||||
log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID)
|
log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID)
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) {
|
||||||
"from_port": "0",
|
"from_port": "0",
|
||||||
"source_security_group_id": "sg-11877275",
|
"source_security_group_id": "sg-11877275",
|
||||||
},
|
},
|
||||||
Expected: "sg-3766347571",
|
Expected: "sgrule-2889201120",
|
||||||
},
|
},
|
||||||
"v0_2": {
|
"v0_2": {
|
||||||
StateVersion: 0,
|
StateVersion: 0,
|
||||||
|
@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) {
|
||||||
"cidr_blocks.2": "172.16.3.0/24",
|
"cidr_blocks.2": "172.16.3.0/24",
|
||||||
"cidr_blocks.3": "172.16.4.0/24",
|
"cidr_blocks.3": "172.16.4.0/24",
|
||||||
"cidr_blocks.#": "4"},
|
"cidr_blocks.#": "4"},
|
||||||
Expected: "sg-4100229787",
|
Expected: "sgrule-1826358977",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"log"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -90,15 +90,15 @@ func TestIpPermissionIDHash(t *testing.T) {
|
||||||
Type string
|
Type string
|
||||||
Output string
|
Output string
|
||||||
}{
|
}{
|
||||||
{simple, "ingress", "sg-82613597"},
|
{simple, "ingress", "sgrule-3403497314"},
|
||||||
{egress, "egress", "sg-363054720"},
|
{egress, "egress", "sgrule-1173186295"},
|
||||||
{egress_all, "egress", "sg-2766285362"},
|
{egress_all, "egress", "sgrule-766323498"},
|
||||||
{vpc_security_group_source, "egress", "sg-2661404947"},
|
{vpc_security_group_source, "egress", "sgrule-351225364"},
|
||||||
{security_group_source, "egress", "sg-1841245863"},
|
{security_group_source, "egress", "sgrule-2198807188"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
actual := ipPermissionIDHash(tc.Type, tc.Input)
|
actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input)
|
||||||
if actual != tc.Output {
|
if actual != tc.Output {
|
||||||
t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual)
|
t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual)
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ func TestAccAWSSecurityGroupRule_Ingress_VPC(t *testing.T) {
|
||||||
Config: testAccAWSSecurityGroupRuleIngressConfig,
|
Config: testAccAWSSecurityGroupRuleIngressConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||||
testAccCheckAWSSecurityGroupRuleAttributes(&group, "ingress"),
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_security_group_rule.ingress_1", "from_port", "80"),
|
"aws_security_group_rule.ingress_1", "from_port", "80"),
|
||||||
testRuleCount,
|
testRuleCount,
|
||||||
|
@ -169,7 +169,7 @@ func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) {
|
||||||
Config: testAccAWSSecurityGroupRuleIngressClassicConfig,
|
Config: testAccAWSSecurityGroupRuleIngressClassicConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||||
testAccCheckAWSSecurityGroupRuleAttributes(&group, "ingress"),
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_security_group_rule.ingress_1", "from_port", "80"),
|
"aws_security_group_rule.ingress_1", "from_port", "80"),
|
||||||
testRuleCount,
|
testRuleCount,
|
||||||
|
@ -231,7 +231,7 @@ func TestAccAWSSecurityGroupRule_Egress(t *testing.T) {
|
||||||
Config: testAccAWSSecurityGroupRuleEgressConfig,
|
Config: testAccAWSSecurityGroupRuleEgressConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||||
testAccCheckAWSSecurityGroupRuleAttributes(&group, "egress"),
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -256,6 +256,92 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// testing partial match implementation
|
||||||
|
func TestAccAWSSecurityGroupRule_PartialMatching_basic(t *testing.T) {
|
||||||
|
var group ec2.SecurityGroup
|
||||||
|
|
||||||
|
p := ec2.IpPermission{
|
||||||
|
FromPort: aws.Int64(80),
|
||||||
|
ToPort: aws.Int64(80),
|
||||||
|
IpProtocol: aws.String("tcp"),
|
||||||
|
IpRanges: []*ec2.IpRange{
|
||||||
|
&ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")},
|
||||||
|
&ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")},
|
||||||
|
&ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
o := ec2.IpPermission{
|
||||||
|
FromPort: aws.Int64(80),
|
||||||
|
ToPort: aws.Int64(80),
|
||||||
|
IpProtocol: aws.String("tcp"),
|
||||||
|
IpRanges: []*ec2.IpRange{
|
||||||
|
&ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSSecurityGroupRulePartialMatching,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||||
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"),
|
||||||
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"),
|
||||||
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) {
|
||||||
|
var group ec2.SecurityGroup
|
||||||
|
var nat ec2.SecurityGroup
|
||||||
|
var p ec2.IpPermission
|
||||||
|
|
||||||
|
// This function creates the expected IPPermission with the group id from an
|
||||||
|
// external security group, needed because Security Group IDs are generated on
|
||||||
|
// AWS side and can't be known ahead of time.
|
||||||
|
setupSG := func(*terraform.State) error {
|
||||||
|
if nat.GroupId == nil {
|
||||||
|
return fmt.Errorf("Error: nat group has nil GroupID")
|
||||||
|
}
|
||||||
|
|
||||||
|
p = ec2.IpPermission{
|
||||||
|
FromPort: aws.Int64(80),
|
||||||
|
ToPort: aws.Int64(80),
|
||||||
|
IpProtocol: aws.String("tcp"),
|
||||||
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
|
&ec2.UserIdGroupPair{GroupId: nat.GroupId},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSSecurityGroupRulePartialMatching_Source,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||||
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat),
|
||||||
|
setupSG,
|
||||||
|
testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSSecurityGroupRuleDestroy(s *terraform.State) error {
|
func testAccCheckAWSSecurityGroupRuleDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
@ -319,14 +405,27 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleType string) resource.TestCheckFunc {
|
func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGroup, p *ec2.IpPermission, ruleType string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
p := &ec2.IpPermission{
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Security Group Rule Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No Security Group Rule is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == nil {
|
||||||
|
p = &ec2.IpPermission{
|
||||||
FromPort: aws.Int64(80),
|
FromPort: aws.Int64(80),
|
||||||
ToPort: aws.Int64(8000),
|
ToPort: aws.Int64(8000),
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchingRule *ec2.IpPermission
|
||||||
var rules []*ec2.IpPermission
|
var rules []*ec2.IpPermission
|
||||||
if ruleType == "ingress" {
|
if ruleType == "ingress" {
|
||||||
rules = group.IpPermissions
|
rules = group.IpPermissions
|
||||||
|
@ -338,16 +437,54 @@ func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleTy
|
||||||
return fmt.Errorf("No IPPerms")
|
return fmt.Errorf("No IPPerms")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compare our ingress
|
for _, r := range rules {
|
||||||
if !reflect.DeepEqual(rules[0], p) {
|
if r.ToPort != nil && *p.ToPort != *r.ToPort {
|
||||||
return fmt.Errorf(
|
continue
|
||||||
"Got:\n\n%#v\n\nExpected:\n\n%#v\n",
|
|
||||||
rules[0],
|
|
||||||
p)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.FromPort != nil && *p.FromPort != *r.FromPort {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining := len(p.IpRanges)
|
||||||
|
for _, ip := range p.IpRanges {
|
||||||
|
for _, rip := range r.IpRanges {
|
||||||
|
if *ip.CidrIp == *rip.CidrIp {
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if remaining > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining = len(p.UserIdGroupPairs)
|
||||||
|
for _, ip := range p.UserIdGroupPairs {
|
||||||
|
for _, rip := range r.UserIdGroupPairs {
|
||||||
|
if *ip.GroupId == *rip.GroupId {
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if remaining > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matchingRule = r
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchingRule != nil {
|
||||||
|
log.Printf("[DEBUG] Matching rule found : %s", matchingRule)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAWSSecurityGroupRuleIngressConfig = `
|
const testAccAWSSecurityGroupRuleIngressConfig = `
|
||||||
|
@ -480,3 +617,104 @@ resource "aws_security_group_rule" "self" {
|
||||||
security_group_id = "${aws_security_group.web.id}"
|
security_group_id = "${aws_security_group.web.id}"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccAWSSecurityGroupRulePartialMatching = `
|
||||||
|
resource "aws_vpc" "default" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
tags {
|
||||||
|
Name = "tf-sg-rule-bug"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "web" {
|
||||||
|
name = "tf-other"
|
||||||
|
vpc_id = "${aws_vpc.default.id}"
|
||||||
|
tags {
|
||||||
|
Name = "tf-other-sg"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "nat" {
|
||||||
|
name = "tf-nat"
|
||||||
|
vpc_id = "${aws_vpc.default.id}"
|
||||||
|
tags {
|
||||||
|
Name = "tf-nat-sg"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "ingress" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"]
|
||||||
|
|
||||||
|
security_group_id = "${aws_security_group.web.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "other" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["10.0.5.0/24"]
|
||||||
|
|
||||||
|
security_group_id = "${aws_security_group.web.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// same a above, but different group, to guard against bad hashing
|
||||||
|
resource "aws_security_group_rule" "nat_ingress" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"]
|
||||||
|
|
||||||
|
security_group_id = "${aws_security_group.nat.id}"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccAWSSecurityGroupRulePartialMatching_Source = `
|
||||||
|
resource "aws_vpc" "default" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
tags {
|
||||||
|
Name = "tf-sg-rule-bug"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "web" {
|
||||||
|
name = "tf-other"
|
||||||
|
vpc_id = "${aws_vpc.default.id}"
|
||||||
|
tags {
|
||||||
|
Name = "tf-other-sg"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "nat" {
|
||||||
|
name = "tf-nat"
|
||||||
|
vpc_id = "${aws_vpc.default.id}"
|
||||||
|
tags {
|
||||||
|
Name = "tf-nat-sg"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "source_ingress" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
|
||||||
|
source_security_group_id = "${aws_security_group.nat.id}"
|
||||||
|
security_group_id = "${aws_security_group.web.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "other_ingress" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"]
|
||||||
|
|
||||||
|
security_group_id = "${aws_security_group.web.id}"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
|
@ -36,6 +36,11 @@ func resourceAwsSpotInstanceRequest() *schema.Resource {
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
}
|
}
|
||||||
|
s["spot_type"] = &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "persistent",
|
||||||
|
}
|
||||||
s["wait_for_fulfillment"] = &schema.Schema{
|
s["wait_for_fulfillment"] = &schema.Schema{
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -69,10 +74,7 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
|
||||||
|
|
||||||
spotOpts := &ec2.RequestSpotInstancesInput{
|
spotOpts := &ec2.RequestSpotInstancesInput{
|
||||||
SpotPrice: aws.String(d.Get("spot_price").(string)),
|
SpotPrice: aws.String(d.Get("spot_price").(string)),
|
||||||
|
Type: aws.String(d.Get("spot_type").(string)),
|
||||||
// We always set the type to "persistent", since the imperative-like
|
|
||||||
// behavior of "one-time" does not map well to TF's declarative domain.
|
|
||||||
Type: aws.String("persistent"),
|
|
||||||
|
|
||||||
// Though the AWS API supports creating spot instance requests for multiple
|
// Though the AWS API supports creating spot instance requests for multiple
|
||||||
// instances, for TF purposes we fix this to one instance per request.
|
// instances, for TF purposes we fix this to one instance per request.
|
||||||
|
|
|
@ -127,6 +127,9 @@ func resourceVPCPeeringConnectionAccept(conn *ec2.EC2, id string) (string, error
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := conn.AcceptVpcPeeringConnection(req)
|
resp, err := conn.AcceptVpcPeeringConnection(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
pc := resp.VpcPeeringConnection
|
pc := resp.VpcPeeringConnection
|
||||||
return *pc.Status.Code, err
|
return *pc.Status.Code, err
|
||||||
}
|
}
|
||||||
|
@ -153,16 +156,15 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
}
|
}
|
||||||
pc := pcRaw.(*ec2.VpcPeeringConnection)
|
pc := pcRaw.(*ec2.VpcPeeringConnection)
|
||||||
|
|
||||||
if *pc.Status.Code == "pending-acceptance" {
|
if pc.Status != nil && *pc.Status.Code == "pending-acceptance" {
|
||||||
|
|
||||||
status, err := resourceVPCPeeringConnectionAccept(conn, d.Id())
|
status, err := resourceVPCPeeringConnectionAccept(conn, d.Id())
|
||||||
|
|
||||||
log.Printf(
|
|
||||||
"[DEBUG] VPC Peering connection accept status %s",
|
|
||||||
status)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Printf(
|
||||||
|
"[DEBUG] VPC Peering connection accept status: %s",
|
||||||
|
status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ func TestAccAWSVPCPeeringConnection_basic(t *testing.T) {
|
||||||
|
|
||||||
func TestAccAWSVPCPeeringConnection_tags(t *testing.T) {
|
func TestAccAWSVPCPeeringConnection_tags(t *testing.T) {
|
||||||
var connection ec2.VpcPeeringConnection
|
var connection ec2.VpcPeeringConnection
|
||||||
|
peerId := os.Getenv("TF_PEER_ID")
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
@ -43,7 +44,7 @@ func TestAccAWSVPCPeeringConnection_tags(t *testing.T) {
|
||||||
CheckDestroy: testAccCheckVpcDestroy,
|
CheckDestroy: testAccCheckVpcDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccVpcPeeringConfigTags,
|
Config: fmt.Sprintf(testAccVpcPeeringConfigTags, peerId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSVpcPeeringConnectionExists("aws_vpc_peering_connection.foo", &connection),
|
testAccCheckAWSVpcPeeringConnectionExists("aws_vpc_peering_connection.foo", &connection),
|
||||||
testAccCheckTags(&connection.Tags, "foo", "bar"),
|
testAccCheckTags(&connection.Tags, "foo", "bar"),
|
||||||
|
@ -117,6 +118,7 @@ resource "aws_vpc" "bar" {
|
||||||
resource "aws_vpc_peering_connection" "foo" {
|
resource "aws_vpc_peering_connection" "foo" {
|
||||||
vpc_id = "${aws_vpc.foo.id}"
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
peer_vpc_id = "${aws_vpc.bar.id}"
|
peer_vpc_id = "${aws_vpc.bar.id}"
|
||||||
|
auto_accept = true
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
@ -132,6 +134,7 @@ resource "aws_vpc" "bar" {
|
||||||
resource "aws_vpc_peering_connection" "foo" {
|
resource "aws_vpc_peering_connection" "foo" {
|
||||||
vpc_id = "${aws_vpc.foo.id}"
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
peer_vpc_id = "${aws_vpc.bar.id}"
|
peer_vpc_id = "${aws_vpc.bar.id}"
|
||||||
|
peer_owner_id = "%s"
|
||||||
tags {
|
tags {
|
||||||
foo = "bar"
|
foo = "bar"
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,6 @@ func resourceAwsVpnConnectionRoute() *schema.Resource {
|
||||||
// You can't update a route. You can just delete one and make
|
// You can't update a route. You can just delete one and make
|
||||||
// a new one.
|
// a new one.
|
||||||
Create: resourceAwsVpnConnectionRouteCreate,
|
Create: resourceAwsVpnConnectionRouteCreate,
|
||||||
Update: resourceAwsVpnConnectionRouteCreate,
|
|
||||||
|
|
||||||
Read: resourceAwsVpnConnectionRouteRead,
|
Read: resourceAwsVpnConnectionRouteRead,
|
||||||
Delete: resourceAwsVpnConnectionRouteDelete,
|
Delete: resourceAwsVpnConnectionRouteDelete,
|
||||||
|
|
||||||
|
|
|
@ -4,13 +4,16 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/directoryservice"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/aws/aws-sdk-go/service/ecs"
|
"github.com/aws/aws-sdk-go/service/ecs"
|
||||||
"github.com/aws/aws-sdk-go/service/elasticache"
|
"github.com/aws/aws-sdk-go/service/elasticache"
|
||||||
|
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
||||||
"github.com/aws/aws-sdk-go/service/elb"
|
"github.com/aws/aws-sdk-go/service/elb"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
"github.com/aws/aws-sdk-go/service/route53"
|
"github.com/aws/aws-sdk-go/service/route53"
|
||||||
|
@ -368,7 +371,7 @@ func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]in
|
||||||
}
|
}
|
||||||
|
|
||||||
// Takes the result of flatmap.Expand for an array of strings
|
// Takes the result of flatmap.Expand for an array of strings
|
||||||
// and returns a []string
|
// and returns a []*string
|
||||||
func expandStringList(configured []interface{}) []*string {
|
func expandStringList(configured []interface{}) []*string {
|
||||||
vs := make([]*string, 0, len(configured))
|
vs := make([]*string, 0, len(configured))
|
||||||
for _, v := range configured {
|
for _, v := range configured {
|
||||||
|
@ -377,6 +380,17 @@ func expandStringList(configured []interface{}) []*string {
|
||||||
return vs
|
return vs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Takes list of pointers to strings. Expand to an array
|
||||||
|
// of raw strings and returns a []interface{}
|
||||||
|
// to keep compatibility w/ schema.NewSetschema.NewSet
|
||||||
|
func flattenStringList(list []*string) []interface{} {
|
||||||
|
vs := make([]interface{}, 0, len(list))
|
||||||
|
for _, v := range list {
|
||||||
|
vs = append(vs, *v)
|
||||||
|
}
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0"
|
//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0"
|
||||||
func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
|
func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
|
||||||
ips := make([]string, 0, len(dtos))
|
ips := make([]string, 0, len(dtos))
|
||||||
|
@ -446,3 +460,144 @@ func expandResourceRecords(recs []interface{}, typeStr string) []*route53.Resour
|
||||||
}
|
}
|
||||||
return records
|
return records
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateRdsId(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"first character of %q must be a letter", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot end with a hyphen", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandESClusterConfig(m map[string]interface{}) *elasticsearch.ElasticsearchClusterConfig {
|
||||||
|
config := elasticsearch.ElasticsearchClusterConfig{}
|
||||||
|
|
||||||
|
if v, ok := m["dedicated_master_enabled"]; ok {
|
||||||
|
isEnabled := v.(bool)
|
||||||
|
config.DedicatedMasterEnabled = aws.Bool(isEnabled)
|
||||||
|
|
||||||
|
if isEnabled {
|
||||||
|
if v, ok := m["dedicated_master_count"]; ok && v.(int) > 0 {
|
||||||
|
config.DedicatedMasterCount = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
if v, ok := m["dedicated_master_type"]; ok && v.(string) != "" {
|
||||||
|
config.DedicatedMasterType = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := m["instance_count"]; ok {
|
||||||
|
config.InstanceCount = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
if v, ok := m["instance_type"]; ok {
|
||||||
|
config.InstanceType = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := m["zone_awareness_enabled"]; ok {
|
||||||
|
config.ZoneAwarenessEnabled = aws.Bool(v.(bool))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[string]interface{} {
|
||||||
|
m := map[string]interface{}{}
|
||||||
|
|
||||||
|
if c.DedicatedMasterCount != nil {
|
||||||
|
m["dedicated_master_count"] = *c.DedicatedMasterCount
|
||||||
|
}
|
||||||
|
if c.DedicatedMasterEnabled != nil {
|
||||||
|
m["dedicated_master_enabled"] = *c.DedicatedMasterEnabled
|
||||||
|
}
|
||||||
|
if c.DedicatedMasterType != nil {
|
||||||
|
m["dedicated_master_type"] = *c.DedicatedMasterType
|
||||||
|
}
|
||||||
|
if c.InstanceCount != nil {
|
||||||
|
m["instance_count"] = *c.InstanceCount
|
||||||
|
}
|
||||||
|
if c.InstanceType != nil {
|
||||||
|
m["instance_type"] = *c.InstanceType
|
||||||
|
}
|
||||||
|
if c.ZoneAwarenessEnabled != nil {
|
||||||
|
m["zone_awareness_enabled"] = *c.ZoneAwarenessEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
return []map[string]interface{}{m}
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenESEBSOptions(o *elasticsearch.EBSOptions) []map[string]interface{} {
|
||||||
|
m := map[string]interface{}{}
|
||||||
|
|
||||||
|
if o.EBSEnabled != nil {
|
||||||
|
m["ebs_enabled"] = *o.EBSEnabled
|
||||||
|
}
|
||||||
|
if o.Iops != nil {
|
||||||
|
m["iops"] = *o.Iops
|
||||||
|
}
|
||||||
|
if o.VolumeSize != nil {
|
||||||
|
m["volume_size"] = *o.VolumeSize
|
||||||
|
}
|
||||||
|
if o.VolumeType != nil {
|
||||||
|
m["volume_type"] = *o.VolumeType
|
||||||
|
}
|
||||||
|
|
||||||
|
return []map[string]interface{}{m}
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions {
|
||||||
|
options := elasticsearch.EBSOptions{}
|
||||||
|
|
||||||
|
if v, ok := m["ebs_enabled"]; ok {
|
||||||
|
options.EBSEnabled = aws.Bool(v.(bool))
|
||||||
|
}
|
||||||
|
if v, ok := m["iops"]; ok && v.(int) > 0 {
|
||||||
|
options.Iops = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
if v, ok := m["volume_size"]; ok && v.(int) > 0 {
|
||||||
|
options.VolumeSize = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
if v, ok := m["volume_type"]; ok && v.(string) != "" {
|
||||||
|
options.VolumeType = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &options
|
||||||
|
}
|
||||||
|
|
||||||
|
func pointersMapToStringList(pointers map[string]*string) map[string]interface{} {
|
||||||
|
list := make(map[string]interface{}, len(pointers))
|
||||||
|
for i, v := range pointers {
|
||||||
|
list[i] = *v
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringMapToPointers(m map[string]interface{}) map[string]*string {
|
||||||
|
list := make(map[string]*string, len(m))
|
||||||
|
for i, v := range m {
|
||||||
|
list[i] = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenDSVpcSettings(
|
||||||
|
s *directoryservice.DirectoryVpcSettingsDescription) []map[string]interface{} {
|
||||||
|
settings := make(map[string]interface{}, 0)
|
||||||
|
|
||||||
|
settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds))
|
||||||
|
settings["vpc_id"] = *s.VpcId
|
||||||
|
|
||||||
|
return []map[string]interface{}{settings}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setTags is a helper to set the tags for a resource. It expects the
|
||||||
|
// tags field to be named "tags"
|
||||||
|
func setTagsEFS(conn *efs.EFS, d *schema.ResourceData) error {
|
||||||
|
if d.HasChange("tags") {
|
||||||
|
oraw, nraw := d.GetChange("tags")
|
||||||
|
o := oraw.(map[string]interface{})
|
||||||
|
n := nraw.(map[string]interface{})
|
||||||
|
create, remove := diffTagsEFS(tagsFromMapEFS(o), tagsFromMapEFS(n))
|
||||||
|
|
||||||
|
// Set tags
|
||||||
|
if len(remove) > 0 {
|
||||||
|
log.Printf("[DEBUG] Removing tags: %#v", remove)
|
||||||
|
k := make([]*string, 0, len(remove))
|
||||||
|
for _, t := range remove {
|
||||||
|
k = append(k, t.Key)
|
||||||
|
}
|
||||||
|
_, err := conn.DeleteTags(&efs.DeleteTagsInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
TagKeys: k,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(create) > 0 {
|
||||||
|
log.Printf("[DEBUG] Creating tags: %#v", create)
|
||||||
|
_, err := conn.CreateTags(&efs.CreateTagsInput{
|
||||||
|
FileSystemId: aws.String(d.Id()),
|
||||||
|
Tags: create,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// diffTags takes our tags locally and the ones remotely and returns
|
||||||
|
// the set of tags that must be created, and the set of tags that must
|
||||||
|
// be destroyed.
|
||||||
|
func diffTagsEFS(oldTags, newTags []*efs.Tag) ([]*efs.Tag, []*efs.Tag) {
|
||||||
|
// First, we're creating everything we have
|
||||||
|
create := make(map[string]interface{})
|
||||||
|
for _, t := range newTags {
|
||||||
|
create[*t.Key] = *t.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the list of what to remove
|
||||||
|
var remove []*efs.Tag
|
||||||
|
for _, t := range oldTags {
|
||||||
|
old, ok := create[*t.Key]
|
||||||
|
if !ok || old != *t.Value {
|
||||||
|
// Delete it!
|
||||||
|
remove = append(remove, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tagsFromMapEFS(create), remove
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsFromMap returns the tags for the given map of data.
|
||||||
|
func tagsFromMapEFS(m map[string]interface{}) []*efs.Tag {
|
||||||
|
var result []*efs.Tag
|
||||||
|
for k, v := range m {
|
||||||
|
result = append(result, &efs.Tag{
|
||||||
|
Key: aws.String(k),
|
||||||
|
Value: aws.String(v.(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsToMap turns the list of tags into a map.
|
||||||
|
func tagsToMapEFS(ts []*efs.Tag) map[string]string {
|
||||||
|
result := make(map[string]string)
|
||||||
|
for _, t := range ts {
|
||||||
|
result[*t.Key] = *t.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/efs"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDiffEFSTags(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Old, New map[string]interface{}
|
||||||
|
Create, Remove map[string]string
|
||||||
|
}{
|
||||||
|
// Basic add/remove
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Remove: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Modify
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Remove: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range cases {
|
||||||
|
c, r := diffTagsEFS(tagsFromMapEFS(tc.Old), tagsFromMapEFS(tc.New))
|
||||||
|
cm := tagsToMapEFS(c)
|
||||||
|
rm := tagsToMapEFS(r)
|
||||||
|
if !reflect.DeepEqual(cm, tc.Create) {
|
||||||
|
t.Fatalf("%d: bad create: %#v", i, cm)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(rm, tc.Remove) {
|
||||||
|
t.Fatalf("%d: bad remove: %#v", i, rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// testAccCheckTags can be used to check the tags on a resource.
|
||||||
|
func testAccCheckEFSTags(
|
||||||
|
ts *[]*efs.Tag, key string, value string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
m := tagsToMapEFS(*ts)
|
||||||
|
v, ok := m[key]
|
||||||
|
if value != "" && !ok {
|
||||||
|
return fmt.Errorf("Missing tag: %s", key)
|
||||||
|
} else if value == "" && ok {
|
||||||
|
return fmt.Errorf("Extra tag: %s", key)
|
||||||
|
}
|
||||||
|
if value == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if v != value {
|
||||||
|
return fmt.Errorf("%s: bad value: %s", key, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -19,7 +20,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error {
|
||||||
|
|
||||||
// Set tags
|
// Set tags
|
||||||
if len(remove) > 0 {
|
if len(remove) > 0 {
|
||||||
log.Printf("[DEBUG] Removing tags: %#v", remove)
|
log.Printf("[DEBUG] Removing tags: %s", remove)
|
||||||
k := make([]*string, len(remove), len(remove))
|
k := make([]*string, len(remove), len(remove))
|
||||||
for i, t := range remove {
|
for i, t := range remove {
|
||||||
k[i] = t.Key
|
k[i] = t.Key
|
||||||
|
@ -34,7 +35,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(create) > 0 {
|
if len(create) > 0 {
|
||||||
log.Printf("[DEBUG] Creating tags: %#v", create)
|
log.Printf("[DEBUG] Creating tags: %s", create)
|
||||||
_, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{
|
_, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{
|
||||||
ResourceName: aws.String(arn),
|
ResourceName: aws.String(arn),
|
||||||
Tags: create,
|
Tags: create,
|
||||||
|
@ -93,3 +94,20 @@ func tagsToMapRDS(ts []*rds.Tag) map[string]string {
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func saveTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error {
|
||||||
|
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
|
||||||
|
ResourceName: aws.String(arn),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dt []*rds.Tag
|
||||||
|
if len(resp.TagList) > 0 {
|
||||||
|
dt = resp.TagList
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.Set("tags", tagsToMapRDS(dt))
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setTags is a helper to set the tags for a resource. It expects the
|
||||||
|
// tags field to be named "tags"
|
||||||
|
func setTagsKinesis(conn *kinesis.Kinesis, d *schema.ResourceData) error {
|
||||||
|
|
||||||
|
sn := d.Get("name").(string)
|
||||||
|
|
||||||
|
if d.HasChange("tags") {
|
||||||
|
oraw, nraw := d.GetChange("tags")
|
||||||
|
o := oraw.(map[string]interface{})
|
||||||
|
n := nraw.(map[string]interface{})
|
||||||
|
create, remove := diffTagsKinesis(tagsFromMapKinesis(o), tagsFromMapKinesis(n))
|
||||||
|
|
||||||
|
// Set tags
|
||||||
|
if len(remove) > 0 {
|
||||||
|
log.Printf("[DEBUG] Removing tags: %#v", remove)
|
||||||
|
k := make([]*string, len(remove), len(remove))
|
||||||
|
for i, t := range remove {
|
||||||
|
k[i] = t.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.RemoveTagsFromStream(&kinesis.RemoveTagsFromStreamInput{
|
||||||
|
StreamName: aws.String(sn),
|
||||||
|
TagKeys: k,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(create) > 0 {
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating tags: %#v", create)
|
||||||
|
t := make(map[string]*string)
|
||||||
|
for _, tag := range create {
|
||||||
|
t[*tag.Key] = tag.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.AddTagsToStream(&kinesis.AddTagsToStreamInput{
|
||||||
|
StreamName: aws.String(sn),
|
||||||
|
Tags: t,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// diffTags takes our tags locally and the ones remotely and returns
|
||||||
|
// the set of tags that must be created, and the set of tags that must
|
||||||
|
// be destroyed.
|
||||||
|
func diffTagsKinesis(oldTags, newTags []*kinesis.Tag) ([]*kinesis.Tag, []*kinesis.Tag) {
|
||||||
|
// First, we're creating everything we have
|
||||||
|
create := make(map[string]interface{})
|
||||||
|
for _, t := range newTags {
|
||||||
|
create[*t.Key] = *t.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the list of what to remove
|
||||||
|
var remove []*kinesis.Tag
|
||||||
|
for _, t := range oldTags {
|
||||||
|
old, ok := create[*t.Key]
|
||||||
|
if !ok || old != *t.Value {
|
||||||
|
// Delete it!
|
||||||
|
remove = append(remove, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tagsFromMapKinesis(create), remove
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsFromMap returns the tags for the given map of data.
|
||||||
|
func tagsFromMapKinesis(m map[string]interface{}) []*kinesis.Tag {
|
||||||
|
var result []*kinesis.Tag
|
||||||
|
for k, v := range m {
|
||||||
|
result = append(result, &kinesis.Tag{
|
||||||
|
Key: aws.String(k),
|
||||||
|
Value: aws.String(v.(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsToMap turns the list of tags into a map.
|
||||||
|
func tagsToMapKinesis(ts []*kinesis.Tag) map[string]string {
|
||||||
|
result := make(map[string]string)
|
||||||
|
for _, t := range ts {
|
||||||
|
result[*t.Key] = *t.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
|
@ -0,0 +1,84 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDiffTagsKinesis(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Old, New map[string]interface{}
|
||||||
|
Create, Remove map[string]string
|
||||||
|
}{
|
||||||
|
// Basic add/remove
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Remove: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Modify
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Remove: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range cases {
|
||||||
|
c, r := diffTagsKinesis(tagsFromMapKinesis(tc.Old), tagsFromMapKinesis(tc.New))
|
||||||
|
cm := tagsToMapKinesis(c)
|
||||||
|
rm := tagsToMapKinesis(r)
|
||||||
|
if !reflect.DeepEqual(cm, tc.Create) {
|
||||||
|
t.Fatalf("%d: bad create: %#v", i, cm)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(rm, tc.Remove) {
|
||||||
|
t.Fatalf("%d: bad remove: %#v", i, rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// testAccCheckTags can be used to check the tags on a resource.
|
||||||
|
func testAccCheckKinesisTags(ts []*kinesis.Tag, key string, value string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
m := tagsToMapKinesis(ts)
|
||||||
|
v, ok := m[key]
|
||||||
|
if value != "" && !ok {
|
||||||
|
return fmt.Errorf("Missing tag: %s", key)
|
||||||
|
} else if value == "" && ok {
|
||||||
|
return fmt.Errorf("Extra tag: %s", key)
|
||||||
|
}
|
||||||
|
if value == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if v != value {
|
||||||
|
return fmt.Errorf("%s: bad value: %s", key, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?><md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" xmlns:ds="http://www.w3.org/2000/09/xmldsig#" entityID="https://terraform2-dev-ed.my.salesforce.com" validUntil="2025-09-02T18:27:19.710Z">
|
||||||
|
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
|
||||||
|
<md:KeyDescriptor use="signing">
|
||||||
|
<ds:KeyInfo>
|
||||||
|
<ds:X509Data>
|
||||||
|
<ds:X509Certificate>MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj</ds:X509Certificate>
|
||||||
|
</ds:X509Data>
|
||||||
|
</ds:KeyInfo>
|
||||||
|
</md:KeyDescriptor>
|
||||||
|
<md:NameIDFormat>urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified</md:NameIDFormat>
|
||||||
|
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="https://terraform2-dev-ed.my.salesforce.com/idp/endpoint/HttpPost"/>
|
||||||
|
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="https://terraform2-dev-ed.my.salesforce.com/idp/endpoint/HttpRedirect"/>
|
||||||
|
</md:IDPSSODescriptor>
|
||||||
|
</md:EntityDescriptor>
|
|
@ -0,0 +1,14 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?><md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" xmlns:ds="http://www.w3.org/2000/09/xmldsig#" entityID="https://terraform-dev-ed.my.salesforce.com" validUntil="2025-09-02T18:27:19.710Z">
|
||||||
|
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
|
||||||
|
<md:KeyDescriptor use="signing">
|
||||||
|
<ds:KeyInfo>
|
||||||
|
<ds:X509Data>
|
||||||
|
<ds:X509Certificate>MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj</ds:X509Certificate>
|
||||||
|
</ds:X509Data>
|
||||||
|
</ds:KeyInfo>
|
||||||
|
</md:KeyDescriptor>
|
||||||
|
<md:NameIDFormat>urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified</md:NameIDFormat>
|
||||||
|
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="https://terraform-dev-ed.my.salesforce.com/idp/endpoint/HttpPost"/>
|
||||||
|
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="https://terraform-dev-ed.my.salesforce.com/idp/endpoint/HttpRedirect"/>
|
||||||
|
</md:IDPSSODescriptor>
|
||||||
|
</md:EntityDescriptor>
|
|
@ -98,7 +98,7 @@ func (c Client) getStorageServiceQueueClient(serviceName string) (storage.QueueS
|
||||||
func (c *Config) NewClientFromSettingsData() (*Client, error) {
|
func (c *Config) NewClientFromSettingsData() (*Client, error) {
|
||||||
mc, err := management.ClientFromPublishSettingsData(c.Settings, c.SubscriptionID)
|
mc, err := management.ClientFromPublishSettingsData(c.Settings, c.SubscriptionID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
|
|
|
@ -64,22 +64,12 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
Certificate: []byte(d.Get("certificate").(string)),
|
Certificate: []byte(d.Get("certificate").(string)),
|
||||||
}
|
}
|
||||||
|
|
||||||
settings := d.Get("settings_file").(string)
|
settingsFile := d.Get("settings_file").(string)
|
||||||
|
if settingsFile != "" {
|
||||||
if settings != "" {
|
// any errors from readSettings would have been caught at the validate
|
||||||
if ok, _ := isFile(settings); ok {
|
// step, so we can avoid handling them now
|
||||||
settingsFile, err := homedir.Expand(settings)
|
settings, _, _ := readSettings(settingsFile)
|
||||||
if err != nil {
|
config.Settings = settings
|
||||||
return nil, fmt.Errorf("Error expanding the settings file path: %s", err)
|
|
||||||
}
|
|
||||||
publishSettingsContent, err := ioutil.ReadFile(settingsFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error reading settings file: %s", err)
|
|
||||||
}
|
|
||||||
config.Settings = publishSettingsContent
|
|
||||||
} else {
|
|
||||||
config.Settings = []byte(settings)
|
|
||||||
}
|
|
||||||
return config.NewClientFromSettingsData()
|
return config.NewClientFromSettingsData()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,31 +82,39 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
"or both a 'subscription_id' and 'certificate'.")
|
"or both a 'subscription_id' and 'certificate'.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateSettingsFile(v interface{}, k string) (warnings []string, errors []error) {
|
func validateSettingsFile(v interface{}, k string) ([]string, []error) {
|
||||||
value := v.(string)
|
value := v.(string)
|
||||||
|
|
||||||
if value == "" {
|
if value == "" {
|
||||||
return
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var settings settingsData
|
_, warnings, errors := readSettings(value)
|
||||||
if err := xml.Unmarshal([]byte(value), &settings); err != nil {
|
return warnings, errors
|
||||||
warnings = append(warnings, `
|
}
|
||||||
|
|
||||||
|
const settingsPathWarnMsg = `
|
||||||
settings_file is not valid XML, so we are assuming it is a file path. This
|
settings_file is not valid XML, so we are assuming it is a file path. This
|
||||||
support will be removed in the future. Please update your configuration to use
|
support will be removed in the future. Please update your configuration to use
|
||||||
${file("filename.publishsettings")} instead.`)
|
${file("filename.publishsettings")} instead.`
|
||||||
} else {
|
|
||||||
|
func readSettings(pathOrContents string) (s []byte, ws []string, es []error) {
|
||||||
|
var settings settingsData
|
||||||
|
if err := xml.Unmarshal([]byte(pathOrContents), &settings); err == nil {
|
||||||
|
s = []byte(pathOrContents)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, err := isFile(value); !ok {
|
ws = append(ws, settingsPathWarnMsg)
|
||||||
errors = append(errors,
|
path, err := homedir.Expand(pathOrContents)
|
||||||
fmt.Errorf(
|
if err != nil {
|
||||||
"account_file path could not be read from '%s': %s",
|
es = append(es, fmt.Errorf("Error expanding path: %s", err))
|
||||||
value,
|
return
|
||||||
err))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s, err = ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
es = append(es, fmt.Errorf("Could not read file '%s': %s", path, err))
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,12 +3,14 @@ package azure
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/config"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testAccProviders map[string]terraform.ResourceProvider
|
var testAccProviders map[string]terraform.ResourceProvider
|
||||||
|
@ -67,20 +69,33 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err)
|
t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err)
|
||||||
}
|
}
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
|
||||||
fx, err := ioutil.TempFile("", "tf-test-xml")
|
fx, err := ioutil.TempFile("", "tf-test-xml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err)
|
t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err)
|
||||||
}
|
}
|
||||||
|
defer os.Remove(fx.Name())
|
||||||
|
|
||||||
|
home, err := homedir.Dir()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error fetching homedir: %s", err)
|
||||||
|
}
|
||||||
|
fh, err := ioutil.TempFile(home, "tf-test-home")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating homedir-based temporary file: %s", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(fh.Name())
|
||||||
|
|
||||||
_, err = io.WriteString(fx, "<PublishData></PublishData>")
|
_, err = io.WriteString(fx, "<PublishData></PublishData>")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error writing XML File: %s", err)
|
t.Fatalf("Error writing XML File: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("fx name: %s", fx.Name())
|
|
||||||
fx.Close()
|
fx.Close()
|
||||||
|
|
||||||
|
r := strings.NewReplacer(home, "~")
|
||||||
|
homePath := r.Replace(fh.Name())
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Input string // String of XML or a path to an XML file
|
Input string // String of XML or a path to an XML file
|
||||||
W int // expected count of warnings
|
W int // expected count of warnings
|
||||||
|
@ -89,6 +104,7 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
||||||
{"test", 1, 1},
|
{"test", 1, 1},
|
||||||
{f.Name(), 1, 0},
|
{f.Name(), 1, 0},
|
||||||
{fx.Name(), 1, 0},
|
{fx.Name(), 1, 0},
|
||||||
|
{homePath, 1, 0},
|
||||||
{"<PublishData></PublishData>", 0, 0},
|
{"<PublishData></PublishData>", 0, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,6 +120,53 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAzure_providerConfigure(t *testing.T) {
|
||||||
|
home, err := homedir.Dir()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error fetching homedir: %s", err)
|
||||||
|
}
|
||||||
|
fh, err := ioutil.TempFile(home, "tf-test-home")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating homedir-based temporary file: %s", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(fh.Name())
|
||||||
|
|
||||||
|
_, err = io.WriteString(fh, testAzurePublishSettingsStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
fh.Close()
|
||||||
|
|
||||||
|
r := strings.NewReplacer(home, "~")
|
||||||
|
homePath := r.Replace(fh.Name())
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
SettingsFile string // String of XML or a path to an XML file
|
||||||
|
NilMeta bool // whether meta is expected to be nil
|
||||||
|
}{
|
||||||
|
{testAzurePublishSettingsStr, false},
|
||||||
|
{homePath, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
rp := Provider()
|
||||||
|
raw := map[string]interface{}{
|
||||||
|
"settings_file": tc.SettingsFile,
|
||||||
|
}
|
||||||
|
|
||||||
|
rawConfig, err := config.NewRawConfig(raw)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rp.Configure(terraform.NewResourceConfig(rawConfig))
|
||||||
|
meta := rp.(*schema.Provider).Meta()
|
||||||
|
if (meta == nil) != tc.NilMeta {
|
||||||
|
t.Fatalf("expected NilMeta: %t, got meta: %#v", tc.NilMeta, meta)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAzure_isFile(t *testing.T) {
|
func TestAzure_isFile(t *testing.T) {
|
||||||
f, err := ioutil.TempFile("", "tf-test-file")
|
f, err := ioutil.TempFile("", "tf-test-file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -129,3 +192,19 @@ func TestAzure_isFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// testAzurePublishSettingsStr is a revoked publishsettings file
|
||||||
|
const testAzurePublishSettingsStr = `
|
||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<PublishData>
|
||||||
|
<PublishProfile
|
||||||
|
SchemaVersion="2.0"
|
||||||
|
PublishMethod="AzureServiceManagementAPI">
|
||||||
|
<Subscription
|
||||||
|
ServiceManagementUrl="https://management.core.windows.net"
|
||||||
|
Id="a65bf94f-26b3-4fb1-9d50-6e27c6096df1"
|
||||||
|
Name="terraform-testing"
|
||||||
|
ManagementCertificate="MIIJ/AIBAzCCCbwGCSqGSIb3DQEHAaCCCa0EggmpMIIJpTCCBe4GCSqGSIb3DQEHAaCCBd8EggXbMIIF1zCCBdMGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAhqcsGLGr+LsQICB9AEggTImIsD3qDkT8IkH4qOlRanUFVQIWCUfXBf5U0QnXS/7N2a5fOeSou0dFuxXg81emaxecr8Myge9rBMHvLi2m4h9JIah6K/33hJhGu3nwiu+n7MzjwpjOfkc4tFMUqD1m/TAF32feq3hYqDjc3FLHrAXNIsrvaucmPipsfT/sq29xC6cWN1sUw6X43F18rqqDKyyGUuEMOJwK9s2Vir/oXlzl6bspVRJHCf0Yyo5+2GWhgcEWjzAOjIZCF7iciYj75aG3mUZjcJYT5DqUQyiyKD/LjWhiYkmHRioaCo4amyrCX92uFuZMIlHOk4LhU+UCyTn/dsvavdj8IH146u/5tUxOIsjP5hN3CcZS/TlMvX9W74uGr5BBs7EWvccUCrYyhmhFOl0YY2+99wob3VOUDSEF73VerYpFEM5POxFzjBj8K7NleB8lEuSjJXn9FbYVUpcZ/u1qhAYewFgf7KBWUTKPjGuf1b8nRVndSIaLyxSZOVbCfUtlAindZoBWjGzCa0opie1axZgouObFxHeo7ZJGjiO2q73YrZOqpPB0zOi/sycadHRKBp4O2Svz4WXBKqa2RV9oM4PYrRnH51cdFmCFqQ4eKGJCnc/Kzdwlt6ldMiCV6gsHTm44NcfPwZW5ivKZPG5aM2mad4rPpQiX+6dQz/ForKZj3WpwI+UIchc7fhwvKykCRpH/GLDBKVrjgWioKHcTDRiqOimCjLkJA+u4Qg/qHKkMOIyr75zfTEw7S9MTiYPSEnFJO60pt3rRrMU99N1Jw07Ld24SsmK4iZExLGFxYKh6jkBWV6CgqWg7qHHH3j1MZTarZSa4W1QdLjwxCQxIPU1O4L5xEa2Ki1prJyDp2E49mo6r2LDkwJrTP9GSvyGBoEpkpKVzgHsRtotikcNetsdlfDCnJiYs2Z6IvcQ2sCZaQXlofVoHZxI3OJUNvulLtuX0L8XedZtbgoFKX1u1KcgRBpae6/S+4VAjB03R+kELoC9BzicBJMifHhpOZuWqhD4zfWq1WQvBqiHI1M0GB5RDtDxxQ0IhdDJavDU6NrgNBQGxfAv1TFd/y/Mvyaq94n1+LrN0joSrxWL6QyxZF4fECGHCf0FDOHSJovkrpc6Fbc4a5mfnzIzsVeLa+m40/3rwkqs+vISCGiVwKd5xmLCmkRrae97Qh/tVRVgpFtRiUOgYVfYulhqURW4fV76giLEZydWvJUkpBxn0LNgpSHO6NJGNHtC1PoSkLEFVae1OVZaAIcshdfssCuVkuZWA3ujxkcnvzQ5uKUyRb6jF3+ID+lqzTh8hY+R+h7iLf7WRICuVedxbNa+TS+bO0/mG90eZo7An1naWy4ty9Hpn+uhKdJ3NpY8LWFZbWkHBF7IHbvlzG59GRmwJWts69y95BiqMWn4wW+1QdAdRL3WvOoMV9McVi/RQVxNskpZ65HiIq4L4VgIgx1G7Yd37zQqDEoBIxLXEq84tyXl1UVmYSt68MFBOPklUtqSiLaDgues2+l+iRjqhsDgsfZXTttxMig6W5WDsOl+xlYt+XaSiLIomjCmCy52cVlhhRjDV92Wl+RTRfi6YlHFeKtnPL1MjuIrz4c+f4PQ4JIn5TRselc6LNTbopr+DinUlz/odjM492AMYHRMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFsGCSqGSIb3DQEJFDFOHkwAewA0AEQAMQBBADYANABFADQALQAzADcAQgBGAC0ANAA5AEYAMgAtADkAOQBEADEALQA1ADkANgBGAEEAMgAzADUARgBBAEQAMQB9MF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAG8AZgB0AHcAYQByAGUAIABLAGUAeQAgAFMAdABvAHIAYQBnAGUAIABQAHIAbwB2AGkAZABlAHIwggOvBgkqhkiG9w0BBwagggOgMIIDnAIBADCCA5UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECPLFyVJDDpzhAgIH0ICCA2hEt7WDTT87EBsNidgZgcuPvMH41IqmN3dd7Vk6RKwwO8dnLGzD6sCA9sLaQ3uKeX9WrxnBbrIzqk4yq6RRPBhW9Gegs85oldLfBsDFpyD4Wi4tQ6LBkH20/Ziy+vPZbZXDlCrrF75ruhtBQrLgtEJ6b/fj9MBw336917A9ALXKa8qcIykq5lBKTz1gRITUkIl35Ylb3kl6wB8L1hSq7jf0tuqMTREI33T3WCn8oBEPdVlgR5L4D6yVGlp62ogUnfFJ8C1V6vLiE45Z9w3ttxi3WCsG/rqz/pWkY2ctGE4Mv5ORuqwZDSChK60DbkfANpdUzqgb+Lw39CLAnmkfQMuZVJyAs/PV65yuVFmdfy5n+m2YzQNLztbsYhdyYHVrgTNrAEsy+3N0OhT3OKschHMoN4YPyu09gxHQWXuSo3X8HvoBHD6NeJ6FIdu1NJx3qCrVJPREMX30Zf6AmmWe3aIFjDz351bIc0Rc4YDAc1RRf1A/JDzeYRZrPDwdbJAj/g4oBEeZEdSmcNFxc42Tz5igTaJWyxHOkAU2iRGU17xb2diVUSCfbVsUwfiSQNcOArMl+JvLfvZp9Ye5lhZKrgTQbWdrDm9jvtCyzAxBILjjBdmQJEoJth9WlgS3ASVxarO194cqjlRvTmmNZ8kdOLt1Ybr2ZlAG2g3gOn7NQeEzyd8WBcxVCGiEyeJBvqpVSMnDGJ4VLHXsiknstr42USzAQN+t7cLOJ+J2Y0phgZ87oAixJnpEoz8z/Z65VV5syREeubiHK5uQmz3pc5qL/5LbYNT1ZqXWbDO+HXpTFJwbZ2DubNjSG1zrGNctzoRuhQidTOepyMvnlJN1PfKZoIQcA+G6PHkrNnBqo13tE9faQA8x2gvOoQYGSFi95UGlc4sTXER0+EbOCYwXkUGatQSlMLpfVXrMkRwlO6g9rC63LZC7ubqqzPPlQwdwbHTMEDxZ5ZsO21RT1JIiXfQEu/gp+HAL+Xqbsiq3Q4CCKTh04mV0Dj4a+kg6XU6BETgdwSjBbxxsbhK7yc0jlgGrNXvC72Ua7IN19zcwsrvwqtkVSc850/i1qQf066h1g/5i5Co7eIgAdRT1/S4nw5CBYGsgr5bl1ZAB2OmmkEiZqYYi3LdeYgr2yK5XcwrcPcOCWv/iN5AHhpgPqzA3MB8wBwYFKw4DAhoEFCcvtRx98fW7DF3rONM5nagH2ffMBBQi0PdBdLzm4i8p2Dhdjj4Vi0whig==" />
|
||||||
|
</PublishProfile>
|
||||||
|
</PublishData>
|
||||||
|
`
|
||||||
|
|
|
@ -13,7 +13,6 @@ func resourceAzureStorageBlob() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAzureStorageBlobCreate,
|
Create: resourceAzureStorageBlobCreate,
|
||||||
Read: resourceAzureStorageBlobRead,
|
Read: resourceAzureStorageBlobRead,
|
||||||
Update: resourceAzureStorageBlobUpdate,
|
|
||||||
Exists: resourceAzureStorageBlobExists,
|
Exists: resourceAzureStorageBlobExists,
|
||||||
Delete: resourceAzureStorageBlobDelete,
|
Delete: resourceAzureStorageBlobDelete,
|
||||||
|
|
||||||
|
@ -122,17 +121,6 @@ func resourceAzureStorageBlobRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resourceAzureStorageBlobUpdate does all the necessary API calls to
|
|
||||||
// update a blob on Azure.
|
|
||||||
func resourceAzureStorageBlobUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
// NOTE: although empty as most parameters have ForceNew set; this is
|
|
||||||
// still required in case of changes to the storage_service_key
|
|
||||||
|
|
||||||
// run the ExistsFunc beforehand to ensure the resource's existence nonetheless:
|
|
||||||
_, err := resourceAzureStorageBlobExists(d, meta)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// resourceAzureStorageBlobExists does all the necessary API calls to
|
// resourceAzureStorageBlobExists does all the necessary API calls to
|
||||||
// check for the existence of the blob on Azure.
|
// check for the existence of the blob on Azure.
|
||||||
func resourceAzureStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceAzureStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
|
|
|
@ -32,18 +32,18 @@ func testSetValueOnResourceData(t *testing.T) {
|
||||||
d := schema.ResourceData{}
|
d := schema.ResourceData{}
|
||||||
d.Set("id", "name")
|
d.Set("id", "name")
|
||||||
|
|
||||||
setValueOrUUID(&d, "id", "name", "54711781-274e-41b2-83c0-17194d0108f7")
|
setValueOrID(&d, "id", "name", "54711781-274e-41b2-83c0-17194d0108f7")
|
||||||
|
|
||||||
if d.Get("id").(string) != "name" {
|
if d.Get("id").(string) != "name" {
|
||||||
t.Fatal("err: 'id' does not match 'name'")
|
t.Fatal("err: 'id' does not match 'name'")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSetUUIDOnResourceData(t *testing.T) {
|
func testSetIDOnResourceData(t *testing.T) {
|
||||||
d := schema.ResourceData{}
|
d := schema.ResourceData{}
|
||||||
d.Set("id", "54711781-274e-41b2-83c0-17194d0108f7")
|
d.Set("id", "54711781-274e-41b2-83c0-17194d0108f7")
|
||||||
|
|
||||||
setValueOrUUID(&d, "id", "name", "54711781-274e-41b2-83c0-17194d0108f7")
|
setValueOrID(&d, "id", "name", "54711781-274e-41b2-83c0-17194d0108f7")
|
||||||
|
|
||||||
if d.Get("id").(string) != "54711781-274e-41b2-83c0-17194d0108f7" {
|
if d.Get("id").(string) != "54711781-274e-41b2-83c0-17194d0108f7" {
|
||||||
t.Fatal("err: 'id' doest not match '54711781-274e-41b2-83c0-17194d0108f7'")
|
t.Fatal("err: 'id' doest not match '54711781-274e-41b2-83c0-17194d0108f7'")
|
||||||
|
|
|
@ -80,12 +80,12 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
|
||||||
// Create a new parameter struct
|
// Create a new parameter struct
|
||||||
p := cs.Volume.NewCreateVolumeParams(name)
|
p := cs.Volume.NewCreateVolumeParams(name)
|
||||||
|
|
||||||
// Retrieve the disk_offering UUID
|
// Retrieve the disk_offering ID
|
||||||
diskofferingid, e := retrieveUUID(cs, "disk_offering", d.Get("disk_offering").(string))
|
diskofferingid, e := retrieveID(cs, "disk_offering", d.Get("disk_offering").(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
// Set the disk_offering UUID
|
// Set the disk_offering ID
|
||||||
p.SetDiskofferingid(diskofferingid)
|
p.SetDiskofferingid(diskofferingid)
|
||||||
|
|
||||||
if d.Get("size").(int) != 0 {
|
if d.Get("size").(int) != 0 {
|
||||||
|
@ -95,8 +95,8 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
|
||||||
|
|
||||||
// If there is a project supplied, we retrieve and set the project id
|
// If there is a project supplied, we retrieve and set the project id
|
||||||
if project, ok := d.GetOk("project"); ok {
|
if project, ok := d.GetOk("project"); ok {
|
||||||
// Retrieve the project UUID
|
// Retrieve the project ID
|
||||||
projectid, e := retrieveUUID(cs, "project", project.(string))
|
projectid, e := retrieveID(cs, "project", project.(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
|
@ -104,8 +104,8 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
|
||||||
p.SetProjectid(projectid)
|
p.SetProjectid(projectid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the zone UUID
|
// Retrieve the zone ID
|
||||||
zoneid, e := retrieveUUID(cs, "zone", d.Get("zone").(string))
|
zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
|
@ -118,7 +118,7 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
|
||||||
return fmt.Errorf("Error creating the new disk %s: %s", name, err)
|
return fmt.Errorf("Error creating the new disk %s: %s", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the volume UUID and partials
|
// Set the volume ID and partials
|
||||||
d.SetId(r.Id)
|
d.SetId(r.Id)
|
||||||
d.SetPartial("name")
|
d.SetPartial("name")
|
||||||
d.SetPartial("device")
|
d.SetPartial("device")
|
||||||
|
@ -160,9 +160,9 @@ func resourceCloudStackDiskRead(d *schema.ResourceData, meta interface{}) error
|
||||||
d.Set("attach", v.Attached != "") // If attached this will contain a timestamp when attached
|
d.Set("attach", v.Attached != "") // If attached this will contain a timestamp when attached
|
||||||
d.Set("size", int(v.Size/(1024*1024*1024))) // Needed to get GB's again
|
d.Set("size", int(v.Size/(1024*1024*1024))) // Needed to get GB's again
|
||||||
|
|
||||||
setValueOrUUID(d, "disk_offering", v.Diskofferingname, v.Diskofferingid)
|
setValueOrID(d, "disk_offering", v.Diskofferingname, v.Diskofferingid)
|
||||||
setValueOrUUID(d, "project", v.Project, v.Projectid)
|
setValueOrID(d, "project", v.Project, v.Projectid)
|
||||||
setValueOrUUID(d, "zone", v.Zonename, v.Zoneid)
|
setValueOrID(d, "zone", v.Zonename, v.Zoneid)
|
||||||
|
|
||||||
if v.Attached != "" {
|
if v.Attached != "" {
|
||||||
// Get the virtual machine details
|
// Get the virtual machine details
|
||||||
|
@ -184,7 +184,7 @@ func resourceCloudStackDiskRead(d *schema.ResourceData, meta interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("device", retrieveDeviceName(v.Deviceid, c.Name))
|
d.Set("device", retrieveDeviceName(v.Deviceid, c.Name))
|
||||||
setValueOrUUID(d, "virtual_machine", v.Vmname, v.Virtualmachineid)
|
setValueOrID(d, "virtual_machine", v.Vmname, v.Virtualmachineid)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -205,13 +205,13 @@ func resourceCloudStackDiskUpdate(d *schema.ResourceData, meta interface{}) erro
|
||||||
// Create a new parameter struct
|
// Create a new parameter struct
|
||||||
p := cs.Volume.NewResizeVolumeParams(d.Id())
|
p := cs.Volume.NewResizeVolumeParams(d.Id())
|
||||||
|
|
||||||
// Retrieve the disk_offering UUID
|
// Retrieve the disk_offering ID
|
||||||
diskofferingid, e := retrieveUUID(cs, "disk_offering", d.Get("disk_offering").(string))
|
diskofferingid, e := retrieveID(cs, "disk_offering", d.Get("disk_offering").(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the disk_offering UUID
|
// Set the disk_offering ID
|
||||||
p.SetDiskofferingid(diskofferingid)
|
p.SetDiskofferingid(diskofferingid)
|
||||||
|
|
||||||
if d.Get("size").(int) != 0 {
|
if d.Get("size").(int) != 0 {
|
||||||
|
@ -228,7 +228,7 @@ func resourceCloudStackDiskUpdate(d *schema.ResourceData, meta interface{}) erro
|
||||||
return fmt.Errorf("Error changing disk offering/size for disk %s: %s", name, err)
|
return fmt.Errorf("Error changing disk offering/size for disk %s: %s", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the volume UUID and set partials
|
// Update the volume ID and set partials
|
||||||
d.SetId(r.Id)
|
d.SetId(r.Id)
|
||||||
d.SetPartial("disk_offering")
|
d.SetPartial("disk_offering")
|
||||||
d.SetPartial("size")
|
d.SetPartial("size")
|
||||||
|
@ -278,7 +278,7 @@ func resourceCloudStackDiskDelete(d *schema.ResourceData, meta interface{}) erro
|
||||||
|
|
||||||
// Delete the voluem
|
// Delete the voluem
|
||||||
if _, err := cs.Volume.DeleteVolume(p); err != nil {
|
if _, err := cs.Volume.DeleteVolume(p); err != nil {
|
||||||
// This is a very poor way to be told the UUID does no longer exist :(
|
// This is a very poor way to be told the ID does no longer exist :(
|
||||||
if strings.Contains(err.Error(), fmt.Sprintf(
|
if strings.Contains(err.Error(), fmt.Sprintf(
|
||||||
"Invalid parameter id value=%s due to incorrect long value format, "+
|
"Invalid parameter id value=%s due to incorrect long value format, "+
|
||||||
"or entity does not exist", d.Id())) {
|
"or entity does not exist", d.Id())) {
|
||||||
|
@ -299,8 +299,8 @@ func resourceCloudStackDiskAttach(d *schema.ResourceData, meta interface{}) erro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the virtual_machine UUID
|
// Retrieve the virtual_machine ID
|
||||||
virtualmachineid, e := retrieveUUID(cs, "virtual_machine", d.Get("virtual_machine").(string))
|
virtualmachineid, e := retrieveID(cs, "virtual_machine", d.Get("virtual_machine").(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
|
@ -341,13 +341,13 @@ func resourceCloudStackDiskDetach(d *schema.ResourceData, meta interface{}) erro
|
||||||
// Create a new parameter struct
|
// Create a new parameter struct
|
||||||
p := cs.Volume.NewDetachVolumeParams()
|
p := cs.Volume.NewDetachVolumeParams()
|
||||||
|
|
||||||
// Set the volume UUID
|
// Set the volume ID
|
||||||
p.SetId(d.Id())
|
p.SetId(d.Id())
|
||||||
|
|
||||||
// Detach the currently attached volume
|
// Detach the currently attached volume
|
||||||
if _, err := cs.Volume.DetachVolume(p); err != nil {
|
if _, err := cs.Volume.DetachVolume(p); err != nil {
|
||||||
// Retrieve the virtual_machine UUID
|
// Retrieve the virtual_machine ID
|
||||||
virtualmachineid, e := retrieveUUID(cs, "virtual_machine", d.Get("virtual_machine").(string))
|
virtualmachineid, e := retrieveID(cs, "virtual_machine", d.Get("virtual_machine").(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,8 +89,8 @@ func resourceCloudStackEgressFirewallCreate(d *schema.ResourceData, meta interfa
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the network UUID
|
// Retrieve the network ID
|
||||||
networkid, e := retrieveUUID(cs, "network", d.Get("network").(string))
|
networkid, e := retrieveID(cs, "network", d.Get("network").(string))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.Error()
|
return e.Error()
|
||||||
}
|
}
|
||||||
|
@ -222,7 +222,7 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
|
||||||
|
|
||||||
// Get the rule
|
// Get the rule
|
||||||
r, count, err := cs.Firewall.GetEgressFirewallRuleByID(id.(string))
|
r, count, err := cs.Firewall.GetEgressFirewallRuleByID(id.(string))
|
||||||
// If the count == 0, there is no object found for this UUID
|
// If the count == 0, there is no object found for this ID
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
delete(uuids, "icmp")
|
delete(uuids, "icmp")
|
||||||
|
@ -415,7 +415,7 @@ func resourceCloudStackEgressFirewallDeleteRule(
|
||||||
// Delete the rule
|
// Delete the rule
|
||||||
if _, err := cs.Firewall.DeleteEgressFirewallRule(p); err != nil {
|
if _, err := cs.Firewall.DeleteEgressFirewallRule(p); err != nil {
|
||||||
|
|
||||||
// This is a very poor way to be told the UUID does no longer exist :(
|
// This is a very poor way to be told the ID does no longer exist :(
|
||||||
if strings.Contains(err.Error(), fmt.Sprintf(
|
if strings.Contains(err.Error(), fmt.Sprintf(
|
||||||
"Invalid parameter id value=%s due to incorrect long value format, "+
|
"Invalid parameter id value=%s due to incorrect long value format, "+
|
||||||
"or entity does not exist", id.(string))) {
|
"or entity does not exist", id.(string))) {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue