Merge remote-tracking branch 'hashicorp/master' into website-update

This commit is contained in:
Jason Costello 2016-10-31 15:26:47 -07:00
commit 64b75faec4
1187 changed files with 130449 additions and 10324 deletions

View File

@ -1,7 +1,7 @@
sudo: false
language: go
go:
- 1.7
- 1.7.3
install:
# This script is used by the Travis build to install a cookie for
# go.googlesource.com so rate limits are higher when using `go get` to fetch
@ -10,6 +10,7 @@ install:
- bash scripts/gogetcookie.sh
script:
- make test vet
- make test TEST=./terraform TESTARGS=-Xnew-apply
branches:
only:
- master

View File

@ -1,55 +1,268 @@
## 0.7.5 (Unreleased)
## 0.7.8 (Unreleased)
BACKWARDS INCOMPATIBILITIES / NOTES:
* provider/openstack: The OpenStack provider has switched to the new Gophercloud SDK.
No front-facing changes were made, but please be aware that there might be bugs.
Please report any if found.
* `archive_file` is now a data source, instead of a resource [GH-8492]
FEATURES:
* **New Provider:** `bitbucket` [GH-7405]
* **New Resource:** `aws_api_gateway_client_certificate` [GH-8775]
* **Experimental new apply graph:** `terraform apply` is getting a new graph
creation process for 0.8. This is now available behind a flag `-Xnew-apply`
(on any command). This will become the default in 0.8. There may still be
bugs. [GH-9388]
* **Experimental new destroy graph:** `terraform destroy` is also getting
a new graph creation process for 0.8. This is now available behind a flag
`-Xnew-destroy`. This will become the default in 0.8. [GH-9527]
* **New Provider:** `pagerduty` [GH-9022]
* **New Resource:** `aws_iam_user_login_profile` [GH-9605]
* **New Resource:** `aws_waf_ipset` [GH-8852]
* **New Resource:** `aws_waf_rule` [GH-8852]
* **New Resource:** `aws_waf_web_acl` [GH-8852]
* **New Resource:** `aws_waf_byte_match_set` [GH-9681]
* **New Resource:** `aws_waf_size_constraint_set` [GH-9689]
* **New Resource:** `aws_waf_sql_injection_match_set` [GH-9709]
* **New Resource:** `aws_waf_xss_match_set` [GH-9710]
* **New Resource:** `aws_ssm_activation` [GH-9111]
* **New Resource:** `azurerm_key_vault` [GH-9478]
* **New Resource:** `azurerm_storage_share` [GH-8674]
* **New Resource:** `azurerm_eventhub_namespace` [GH-9297]
* **New Resource:** `cloudstack_security_group` [GH-9103]
* **New Resource:** `cloudstack_security_group_rule` [GH-9645]
* **New Resource:** `cloudstack_private_gateway` [GH-9637]
* **New Resource:** `cloudstack_static_route` [GH-9637]
* **New DataSource:** `aws_prefix_list` [GH-9566]
* **New DataSource:** `aws_security_group` [GH-9604]
* **New DataSource:** `azurerm_client_config` [GH-9478]
* **New Interpolation Function:** `ceil` [GH-9692]
* **New Interpolation Function:** `floor` [GH-9692]
* **New Interpolation Function:** `min` [GH-9692]
* **New Interpolation Function:** `max` [GH-9692]
* **New Interpolation Function:** `title` [GH-9087]
* **New Interpolation Function:** `zipmap` [GH-9627]
IMPROVEMENTS:
* provider/aws: Add JSON validation to the `aws_cloudwatch_event_rule` resource [GH-8897]
* provider/aws: S3 bucket policies are imported as separate resources [GH-8915]
* provider/aws: S3 bucket policies can now be removed via the `aws_s3_bucket` resource [GH-8915]
* provider/aws: Added a cluster_address attribute to aws elasticache [GH-8935]
* provider/aws: Add JSON validation to the aws_elasticsearch_domain resource. [GH-8898]
* provider/aws: Add JSON validation to the aws_kms_key resource. [GH-8900]
* provider/aws: Add JSON validation to the aws_s3_bucket_policy resource. [GH-8901]
* provider/aws: Add JSON validation to the aws_sns_topic resource. [GH-8902]
* provider/aws: Add JSON validation to the aws_sns_topic_policy resource. [GH-8903]
* provider/aws: Add JSON validation to the aws_sqs_queue resource. [GH-8904]
* provider/aws: Add JSON validation to the aws_sqs_queue_policy resource. [GH-8905]
* provider/aws: Add JSON validation to the aws_vpc_endpoint resource. [GH-8906]
* provider/aws: Update aws_cloudformation_stack data source with new helper function. [GH-8907]
* provider/aws: Add JSON validation to the aws_s3_bucket resource. [GH-8908]
* provider/aws: Add support for `cloudwatch_logging_options` to Firehose Delivery Streams [GH-8671]
* provider/aws: Add HTTP/2 support via the http_version parameter to CloudFront distribution [GH-8777]
* provider/aws: Add query_string_cache_keys to allow for selective caching of CloudFront keys [GH-8777]
* provider/aws: Support Import aws_elasticache_cluster [GH-9010]
* provider/aws: Add support for tags to aws_cloudfront_distribution [GH-9011]
* provider/azurerm: Add normalizeJsonString and validateJsonString functions [GH-8909]
* provider/openstack: Use proxy environment variables for communication with services [GH-8948]
* provider/vsphere: Adding 'detach_unknown_disks_on_delete' flag for VM resource [GH-8947]
* provider/aws: No longer require `route_table_ids` list in `aws_vpc_endpoint` resources [GH-9357]
* provider/aws: Allow `description` in `aws_redshift_subnet_group` to be modified [GH-9515]
* provider/aws: Add tagging support to aws_redshift_subnet_group [GH-9504]
* provider/aws: Add validation to IAM User and Group Name [GH-9584]
* provider/aws: Add Ability To Enable / Disable ALB AccessLogs [GH-9290]
* provider/aws: Add support for `AutoMinorVersionUpgrade` to aws_elasticache_replication_group resource. [GH-9657]
* provider/aws: Fix import of RouteTable with destination prefixes [GH-9686]
* provider/aws: Add support for reference_name to aws_route53_health_check [GH-9737]
* provider/aws: Expose ARN suffix on ALB Target Group [GH-9734]
* provider/azurerm: add account_kind and access_tier to storage_account [GH-9408]
* provider/azurerm: write load_balanacer attributes to network_interface_card hash [GH-9207]
* provider/azurerm: Add disk_size_gb param to VM storage_os_disk [GH-9200]
* provider/azurerm: support importing of subnet resource [GH-9646]
* provider/digitalocean: Allow resizing DigitalOcean Droplets without increasing disk size. [GH-9573]
* provider/google: enhance service scope list [GH-9442]
* provider/google Change default MySQL instance version to 5.6 [GH-9674]
* provider/google Support MySQL 5.7 instances [GH-9673]
* provider/openstack: Added value_specs to openstack_networking_port_v2, allowing vendor information [GH-9551]
* provider/openstack: Added value_specs to openstack_networking_floatingip_v2, allowing vendor information [GH-9552]
* provider/openstack: Added value_specs to openstack_compute_keypair_v2, allowing vendor information [GH-9554]
* provider/openstack: Allow any protocol in openstack_fw_rule_v1 [GH-9617]
* provider/scaleway: update sdk for ams1 region [GH-9687]
* provider/scaleway: server volume property [GH-9695]
BUG FIXES:
* core: Fixed variables not being in scope for destroy -target on modules [GH-9021]
* provider/aws: Remove support for `ah` and `esp` literals in Security Group Ingress/Egress rules; you must use the actual protocol number for protocols other than `tcp`, `udp`, `icmp`, or `all` [GH-8975]
* provider/aws: VPC ID, Port, Protocol and Name change on aws_alb_target_group will ForceNew resource [GH-8989]
* provider/aws: Wait for Spot Fleet to drain before removing from state [GH-8938]
* provider/aws: Fix issue when importing `aws_eip` resources by IP address [GH-8970]
* provider/aws: Ensure that origin_access_identity is a required value within the CloudFront distribution s3_config block [GH-8777]
* provider/aws: Corrected Seoul S3 Website Endpoint format [GH-9032]
* provider/aws: Fix failed remove S3 lifecycle_rule [GH-9031]
* provider/aws: Fix crashing bug in `aws_ami` data source when using `name_regex` [GH-9033]
* provider/aws: Fix reading dimensions on cloudwatch alarms [GH-9029]
* provider/aws: Changing snapshot_identifier on aws_db_instance resource should force… [GH-8806]
* provider/aws: Refresh AWS EIP association from state when not found [GH-9056]
* provider/aws: Make encryption in Aurora instances computed-only [GH-9060]
* provider/aws: Make sure that VPC Peering Connection in a failed state returns an error. [GH-9038]
* provider/aws: guard against aws_route53_record delete panic [GH-9049]
* provider/aws: aws_db_option_group flattenOptions failing due to missing values [GH-9052]
* provider/aws: Add retry logic to the aws_ecr_repository delete func [GH-9050]
* provider/aws: Modifying the parameter_group_name of aws_elasticache_replication_group caused a panic [GH-9101]
* provider/aws: Fix issue with updating ELB subnets for subnets in the same AZ [GH-9131]
* provider/librato: Mandatory name and conditions attributes weren't being sent on Update unless changed [GH-8984]
* core: Resources suffixed with 'panic' won't falsely trigger crash detection. [GH-9395]
* core: Validate lifecycle options don't contain interpolations. [GH-9576]
* core: Tainted resources will not process `ignore_changes`. [GH-7855]
* core: Boolean looking values passed in via `-var` no longer cause type errors. [GH-9642]
* core: Update crypto/ssh to support ED25519 SSH keys [GH-9661
* core: Computed primitives in certain cases no longer cause diff mismatch errors. [GH-9618]
* core: Empty arrays for list vars in JSON work [GH-8886]
* core: Boolean types in tfvars work propertly [GH-9751]
* core: Deposed resource destruction is accounted for properly in `apply` counts. [GH-9731]
* core: Check for graph cycles on resource expansion to catch cycles between self-referenced resources. [GH-9728]
* core: `prevent_destroy` prevents decreasing count [GH-9707]
* core: removed optional items will trigger "requires new" if necessary [GH-9699]
* command/apply: `-backup` and `-state-out` work with plan files [GH-9706]
* command/validate: respond to `--help` [GH-9660]
* provider/archive: Converting to datasource. [GH-8492]
* provider/aws: Fix issue importing AWS Instances and setting the correct `associate_public_ip_address` value [GH-9453]
* provider/aws: Fix issue with updating ElasticBeanstalk environment variables [GH-9259]
* provider/aws: Allow zero value for `scaling_adjustment` in `aws_autoscaling_policy` when using `SimpleScaling` [GH-8893]
* provider/aws: Increase ECS service drain timeout [GH-9521]
* provider/aws: Remove VPC Endpoint from state if it's not found [GH-9561]
* provider/aws: Delete Loging Profile from IAM User on force_destroy [GH-9583]
* provider/aws: Exposed aws_api_gw_domain_name.certificate_upload_date attribute [GH-9533]
* provider/aws: fix aws_elasticache_replication_group for Redis in cluster mode [GH-9601]
* provider/aws: Validate regular expression passed via the ami data_source `name_regex` attribute. [GH-9622]
* provider/aws: Bug fix for NoSuckBucket on Destroy of aws_s3_bucket_policy [GH-9641]
* provider/aws: Refresh aws_autoscaling_schedule from state on 404 [GH-9659]
* provider/aws: Allow underscores in IAM user and group names [GH-9684]
* provider/aws: aws_ami: handle deletion of AMIs [GH-9721]
* provider/aws: Fix aws_route53_record alias perpetual diff [GH-9704]
* provider/azurerm: Fix Azure RM loadbalancer rules validation [GH-9468]
* provider/azurerm: Fix servicebus_topic values when using the Update func to stop perpetual diff [GH-9323]
* provider/azurerm: lower servicebus_topic max size to Azure limit [GH-9649]
* provider/azurerm: Fix VHD deletion when VM and Storage account are in separate resource groups [GH-9631]
* provider/azurerm: Guard against panic when importing arm_virtual_network [GH-9739]
* provider/cloudflare: update client library to stop connection closed issues [GH-9715]
* provider/consul: Change to consul_service resource to introduce a `service_id` parameter [GH-9366]
* provider/datadog: Ignore float/int diffs on thresholds [GH-9466]
* provider/docker: Fixes for docker_container host object and documentation [GH-9367]
* provider/scaleway improve the performance of server deletion [GH-9491]
* provider/scaleway: fix scaleway_volume_attachment with count > 1 [GH-9493]
## 0.7.7 (October 18, 2016)
FEATURES:
* **New Data Source:** `scaleway_bootsscript`. ([#9386](https://github.com/hashicorp/terraform/issues/9386))
* **New Data Source:** `scaleway_image`. [GH-9386]
IMPROVEMENTS:
* core: When the environment variable TF_LOG_PATH is specified, debug logs are now appended to the specified file instead of being truncated. ([#9440](https://github.com/hashicorp/terraform/pull/9440))
* provider/aws: Expose ARN for `aws_lambda_alias`. ([#9390](https://github.com/hashicorp/terraform/issues/9390))
* provider/aws: Add support for AWS US East (Ohio) region. ([#9414](https://github.com/hashicorp/terraform/issues/9414))
* provider/scaleway: `scaleway_ip`, `scaleway_security_group`, `scalway_server` and `scaleway_volume` resources can now be imported. ([#9387](https://github.com/hashicorp/terraform/issues/9387))
BUG FIXES:
* core: List and map indexes support arithmetic. ([#9372](https://github.com/hashicorp/terraform/issues/9372))
* core: List and map indexes are implicitly converted to the correct type if possible. ([#9372](https://github.com/hashicorp/terraform/issues/9372))
* provider/aws: Read back `associate_public_ip_address` in `aws_launch_configuration` resources to enable importing. ([#9399](https://github.com/hashicorp/terraform/issues/9399))
* provider/aws: Remove `aws_route` resources from state if their associated `aws_route_table` has been removed. ([#9431](https://github.com/hashicorp/terraform/issues/9431))
* provider/azurerm: Load balancer resources now have their `id` attribute set to the resource URI instead of the load balancer URI. ([#9401](https://github.com/hashicorp/terraform/issues/9401))
* provider/google: Fix a bug causing a crash when migrating `google_compute_target_pool` resources from 0.6.x releases. ([#9370](https://github.com/hashicorp/terraform/issues/9370))
## 0.7.6 (October 14, 2016)
BACKWARDS INCOMPATIBILITIES / NOTES:
* `azurerm_virtual_machine` has deprecated the use of `diagnostics_profile` in favour of `boot_diagnostics`. ([#9122](https://github.com/hashicorp/terraform/issues/9122))
* The deprecated `key_file` and `bastion_key_file` arguments to Provisioner Connections have been removed ([#9340](https://github.com/hashicorp/terraform/issues/9340))
FEATURES:
* **New Data Source:** `aws_billing_service_account` ([#8701](https://github.com/hashicorp/terraform/issues/8701))
* **New Data Source:** `aws_availability_zone` ([#6819](https://github.com/hashicorp/terraform/issues/6819))
* **New Data Source:** `aws_region` ([#6819](https://github.com/hashicorp/terraform/issues/6819))
* **New Data Source:** `aws_subnet` ([#6819](https://github.com/hashicorp/terraform/issues/6819))
* **New Data Source:** `aws_vpc` ([#6819](https://github.com/hashicorp/terraform/issues/6819))
* **New Resource:** `azurerm_lb` ([#9199](https://github.com/hashicorp/terraform/issues/9199))
* **New Resource:** `azurerm_lb_backend_address_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199))
* **New Resource:** `azurerm_lb_nat_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199))
* **New Resource:** `azurerm_lb_nat_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199))
* **New Resource:** `azurerm_lb_probe` ([#9199](https://github.com/hashicorp/terraform/issues/9199))
* **New Resource:** `azurerm_lb_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199))
* **New Resource:** `github_repository` ([#9327](https://github.com/hashicorp/terraform/issues/9327))
IMPROVEMENTS:
* core-validation: create validation package to provide common validation functions ([#8103](https://github.com/hashicorp/terraform/issues/8103))
* provider/aws: Support Import of OpsWorks Custom Layers ([#9252](https://github.com/hashicorp/terraform/issues/9252))
* provider/aws: Automatically constructed ARNs now support partitions other than `aws`, allowing operation with `aws-cn` and `aws-us-gov` ([#9273](https://github.com/hashicorp/terraform/issues/9273))
* provider/aws: Retry setTags operation for EC2 resources ([#7890](https://github.com/hashicorp/terraform/issues/7890))
* provider/aws: Support refresh of EC2 instance `user_data` ([#6736](https://github.com/hashicorp/terraform/issues/6736))
* provider/aws: Poll to confirm delete of `resource_aws_customer_gateway` ([#9346](https://github.com/hashicorp/terraform/issues/9346))
* provider/azurerm: expose default keys for `servicebus_namespace` ([#9242](https://github.com/hashicorp/terraform/issues/9242))
* provider/azurerm: add `enable_blob_encryption` to `azurerm_storage_account` resource ([#9233](https://github.com/hashicorp/terraform/issues/9233))
* provider/azurerm: set `resource_group_name` on resource import across the provider ([#9073](https://github.com/hashicorp/terraform/issues/9073))
* provider/azurerm: `azurerm_cdn_profile` resources can now be imported ([#9306](https://github.com/hashicorp/terraform/issues/9306))
* provider/datadog: add support for Datadog dashboard "type" and "style" options ([#9228](https://github.com/hashicorp/terraform/issues/9228))
* provider/scaleway: `region` is now supported for provider configuration
BUG FIXES:
* core: Local state can now be refreshed when no resources exist ([#7320](https://github.com/hashicorp/terraform/issues/7320))
* core: Orphaned nested (depth 2+) modules will inherit provider configs ([#9318](https://github.com/hashicorp/terraform/issues/9318))
* core: Fix crash when a map key contains an interpolation function ([#9282](https://github.com/hashicorp/terraform/issues/9282))
* core: Numeric variables values were incorrectly converted to numbers ([#9263](https://github.com/hashicorp/terraform/issues/9263))
* core: Fix input and output of map variables from HCL ([#9268](https://github.com/hashicorp/terraform/issues/9268))
* core: Crash when interpolating a map value with a function in the key ([#9282](https://github.com/hashicorp/terraform/issues/9282))
* core: Crash when copying a nil value in an InstanceState ([#9356](https://github.com/hashicorp/terraform/issues/9356))
* command/fmt: Bare comment groups no longer have superfluous newlines
* command/fmt: Leading comments on list items are formatted properly
* provider/aws: Return correct AMI image when `most_recent` is set to `true`. ([#9277](https://github.com/hashicorp/terraform/issues/9277))
* provider/aws: Fix issue with diff on import of `aws_eip` in EC2 Classic ([#9009](https://github.com/hashicorp/terraform/issues/9009))
* provider/aws: Handle EC2 tags related errors in CloudFront Distribution resource. ([#9298](https://github.com/hashicorp/terraform/issues/9298))
* provider/aws: Fix cause error when using `etag` and `kms_key_id` with `aws_s3_bucket_object` ([#9168](https://github.com/hashicorp/terraform/issues/9168))
* provider/aws: Fix issue reassigning EIP instances appropriately ([#7686](https://github.com/hashicorp/terraform/issues/7686))
* provider/azurerm: removing resources from state when the API returns a 404 for them ([#8859](https://github.com/hashicorp/terraform/issues/8859))
* provider/azurerm: Fixed a panic in `azurerm_virtual_machine` when using `diagnostic_profile` ([#9122](https://github.com/hashicorp/terraform/issues/9122))
## 0.7.5 (October 6, 2016)
BACKWARDS INCOMPATIBILITIES / NOTES:
* `tls_cert_request` is now a managed resource instead of a data source, restoring the pre-Terraform 0.7 behaviour ([#9035](https://github.com/hashicorp/terraform/issues/9035))
FEATURES:
* **New Provider:** `bitbucket` ([#7405](https://github.com/hashicorp/terraform/issues/7405))
* **New Resource:** `aws_api_gateway_client_certificate` ([#8775](https://github.com/hashicorp/terraform/issues/8775))
* **New Resource:** `azurerm_servicebus_topic` ([#9151](https://github.com/hashicorp/terraform/issues/9151))
* **New Resource:** `azurerm_servicebus_subscription` ([#9185](https://github.com/hashicorp/terraform/issues/9185))
* **New Resource:** `aws_emr_cluster` ([#9106](https://github.com/hashicorp/terraform/issues/9106))
* **New Resource:** `aws_emr_instance_group` ([#9106](https://github.com/hashicorp/terraform/issues/9106))
IMPROVEMENTS:
* helper/schema: Adding of MinItems as a validation to Lists and Maps ([#9216](https://github.com/hashicorp/terraform/issues/9216))
* provider/aws: Add JSON validation to the `aws_cloudwatch_event_rule` resource ([#8897](https://github.com/hashicorp/terraform/issues/8897))
* provider/aws: S3 bucket policies are imported as separate resources ([#8915](https://github.com/hashicorp/terraform/issues/8915))
* provider/aws: S3 bucket policies can now be removed via the `aws_s3_bucket` resource ([#8915](https://github.com/hashicorp/terraform/issues/8915))
* provider/aws: Added a `cluster_address` attribute to aws elasticache ([#8935](https://github.com/hashicorp/terraform/issues/8935))
* provider/aws: Add JSON validation to the `aws_elasticsearch_domain resource`. ([#8898](https://github.com/hashicorp/terraform/issues/8898))
* provider/aws: Add JSON validation to the `aws_kms_key resource`. ([#8900](https://github.com/hashicorp/terraform/issues/8900))
* provider/aws: Add JSON validation to the `aws_s3_bucket_policy resource`. ([#8901](https://github.com/hashicorp/terraform/issues/8901))
* provider/aws: Add JSON validation to the `aws_sns_topic resource`. ([#8902](https://github.com/hashicorp/terraform/issues/8902))
* provider/aws: Add JSON validation to the `aws_sns_topic_policy resource`. ([#8903](https://github.com/hashicorp/terraform/issues/8903))
* provider/aws: Add JSON validation to the `aws_sqs_queue resource`. ([#8904](https://github.com/hashicorp/terraform/issues/8904))
* provider/aws: Add JSON validation to the `aws_sqs_queue_policy resource`. ([#8905](https://github.com/hashicorp/terraform/issues/8905))
* provider/aws: Add JSON validation to the `aws_vpc_endpoint resource`. ([#8906](https://github.com/hashicorp/terraform/issues/8906))
* provider/aws: Update `aws_cloudformation_stack` data source with new helper function. ([#8907](https://github.com/hashicorp/terraform/issues/8907))
* provider/aws: Add JSON validation to the `aws_s3_bucket` resource. ([#8908](https://github.com/hashicorp/terraform/issues/8908))
* provider/aws: Add support for `cloudwatch_logging_options` to Firehose Delivery Streams ([#8671](https://github.com/hashicorp/terraform/issues/8671))
* provider/aws: Add HTTP/2 support via the http_version parameter to CloudFront distribution ([#8777](https://github.com/hashicorp/terraform/issues/8777))
* provider/aws: Add `query_string_cache_keys` to allow for selective caching of CloudFront keys ([#8777](https://github.com/hashicorp/terraform/issues/8777))
* provider/aws: Support Import `aws_elasticache_cluster` ([#9010](https://github.com/hashicorp/terraform/issues/9010))
* provider/aws: Add support for tags to `aws_cloudfront_distribution` ([#9011](https://github.com/hashicorp/terraform/issues/9011))
* provider/aws: Support Import `aws_opsworks_stack` ([#9124](https://github.com/hashicorp/terraform/issues/9124))
* provider/aws: Support Import `aws_elasticache_replication_groups` ([#9140](https://github.com/hashicorp/terraform/issues/9140))
* provider/aws: Add new aws api-gateway integration types ([#9213](https://github.com/hashicorp/terraform/issues/9213))
* provider/aws: Import `aws_db_event_subscription` ([#9220](https://github.com/hashicorp/terraform/issues/9220))
* provider/azurerm: Add normalizeJsonString and validateJsonString functions ([#8909](https://github.com/hashicorp/terraform/issues/8909))
* provider/azurerm: Support AzureRM Sql Database DataWarehouse ([#9196](https://github.com/hashicorp/terraform/issues/9196))
* provider/openstack: Use proxy environment variables for communication with services ([#8948](https://github.com/hashicorp/terraform/issues/8948))
* provider/vsphere: Adding `detach_unknown_disks_on_delete` flag for VM resource ([#8947](https://github.com/hashicorp/terraform/issues/8947))
* provisioner/chef: Add `skip_register` attribute to allow skipping the registering steps ([#9127](https://github.com/hashicorp/terraform/issues/9127))
BUG FIXES:
* core: Fixed variables not being in scope for destroy -target on modules ([#9021](https://github.com/hashicorp/terraform/issues/9021))
* core: Fixed issue that prevented diffs from being properly generated in a specific resource schema scenario ([#8891](https://github.com/hashicorp/terraform/issues/8891))
* provider/aws: Remove support for `ah` and `esp` literals in Security Group Ingress/Egress rules; you must use the actual protocol number for protocols other than `tcp`, `udp`, `icmp`, or `all` ([#8975](https://github.com/hashicorp/terraform/issues/8975))
* provider/aws: Do not report drift for effect values differing only by case in AWS policies ([#9139](https://github.com/hashicorp/terraform/issues/9139))
* provider/aws: VPC ID, Port, Protocol and Name change on aws_alb_target_group will ForceNew resource ([#8989](https://github.com/hashicorp/terraform/issues/8989))
* provider/aws: Wait for Spot Fleet to drain before removing from state ([#8938](https://github.com/hashicorp/terraform/issues/8938))
* provider/aws: Fix issue when importing `aws_eip` resources by IP address ([#8970](https://github.com/hashicorp/terraform/issues/8970))
* provider/aws: Ensure that origin_access_identity is a required value within the CloudFront distribution s3_config block ([#8777](https://github.com/hashicorp/terraform/issues/8777))
* provider/aws: Corrected Seoul S3 Website Endpoint format ([#9032](https://github.com/hashicorp/terraform/issues/9032))
* provider/aws: Fix failed remove S3 lifecycle_rule ([#9031](https://github.com/hashicorp/terraform/issues/9031))
* provider/aws: Fix crashing bug in `aws_ami` data source when using `name_regex` ([#9033](https://github.com/hashicorp/terraform/issues/9033))
* provider/aws: Fix reading dimensions on cloudwatch alarms ([#9029](https://github.com/hashicorp/terraform/issues/9029))
* provider/aws: Changing snapshot_identifier on aws_db_instance resource should force… ([#8806](https://github.com/hashicorp/terraform/issues/8806))
* provider/aws: Refresh AWS EIP association from state when not found ([#9056](https://github.com/hashicorp/terraform/issues/9056))
* provider/aws: Make encryption in Aurora instances computed-only ([#9060](https://github.com/hashicorp/terraform/issues/9060))
* provider/aws: Make sure that VPC Peering Connection in a failed state returns an error. ([#9038](https://github.com/hashicorp/terraform/issues/9038))
* provider/aws: guard against aws_route53_record delete panic ([#9049](https://github.com/hashicorp/terraform/issues/9049))
* provider/aws: aws_db_option_group flattenOptions failing due to missing values ([#9052](https://github.com/hashicorp/terraform/issues/9052))
* provider/aws: Add retry logic to the aws_ecr_repository delete func ([#9050](https://github.com/hashicorp/terraform/issues/9050))
* provider/aws: Modifying the parameter_group_name of aws_elasticache_replication_group caused a panic ([#9101](https://github.com/hashicorp/terraform/issues/9101))
* provider/aws: Fix issue with updating ELB subnets for subnets in the same AZ ([#9131](https://github.com/hashicorp/terraform/issues/9131))
* provider/aws: aws_route53_record alias refresh manually updated record ([#9125](https://github.com/hashicorp/terraform/issues/9125))
* provider/aws: Fix issue detaching volumes that were already detached ([#9023](https://github.com/hashicorp/terraform/issues/9023))
* provider/aws: Add retry to the `aws_ssm_document` delete func ([#9188](https://github.com/hashicorp/terraform/issues/9188))
* provider/aws: Fix issue updating `search_string` in aws_cloudwatch_metric_alarm ([#9230](https://github.com/hashicorp/terraform/issues/9230))
* provider/aws: Update EFS resource to read performance mode and creation_token ([#9234](https://github.com/hashicorp/terraform/issues/9234))
* provider/azurerm: fix resource ID parsing for subscriptions resources ([#9163](https://github.com/hashicorp/terraform/issues/9163))
* provider/librato: Mandatory name and conditions attributes weren't being sent on Update unless changed ([#8984](https://github.com/hashicorp/terraform/issues/8984))
* provisioner/chef: Fix an error with parsing certain `vault_json` content ([#9114](https://github.com/hashicorp/terraform/issues/9114))
* provisioner/chef: Change to order in which to cleanup the user key so this is done before the Chef run starts ([#9114](https://github.com/hashicorp/terraform/issues/9114))
## 0.7.4 (September 19, 2016)
@ -116,7 +329,7 @@ IMPROVEMENTS:
* core: Show last resource state in a timeout error message ([#8510](https://github.com/hashicorp/terraform/issues/8510))
* helper/schema: Add diff suppression callback ([#8585](https://github.com/hashicorp/terraform/issues/8585))
* provider/aws: API Gateway Custom Authorizer ([#8535](https://github.com/hashicorp/terraform/issues/8535))
* provider/aws: Add MemoryReservation To `aws_ecs_container_definition` data source ([#8437](https://github.com/hashicorp/terraform/issues/8437))
* provider/aws: Add MemoryReservation To `aws_ecs_container_definition` data source ([#8437](https://github.com/hashicorp/terraform/issues/8437))
* provider/aws: Add ability Enable/Disable For ELB Access logs ([#8438](https://github.com/hashicorp/terraform/issues/8438))
* provider/aws: Add support for assuming a role prior to performing API operations ([#8638](https://github.com/hashicorp/terraform/issues/8638))
* provider/aws: Export `arn` of `aws_autoscaling_group` ([#8503](https://github.com/hashicorp/terraform/issues/8503))
@ -126,14 +339,14 @@ IMPROVEMENTS:
* provider/aws: Support for lifecycle hooks at ASG creation ([#5620](https://github.com/hashicorp/terraform/issues/5620))
* provider/consul: Make provider settings truly optional ([#8551](https://github.com/hashicorp/terraform/issues/8551))
* provider/statuscake: Add support for contact-group id in statuscake test ([#8417](https://github.com/hashicorp/terraform/issues/8417))
BUG FIXES:
* core: Changing a module source from file to VCS no longer errors ([#8398](https://github.com/hashicorp/terraform/issues/8398))
* core: Configuration is now validated prior to input, fixing an obscure parse error when attempting to interpolate a count ([#8591](https://github.com/hashicorp/terraform/issues/8591))
* core: JSON configuration with resources with a single key parse properly ([#8485](https://github.com/hashicorp/terraform/issues/8485))
* core: States with duplicate modules are detected and an error is shown ([#8463](https://github.com/hashicorp/terraform/issues/8463))
* core: Validate uniqueness of variables/outputs in a module ([#8482](https://github.com/hashicorp/terraform/issues/8482))
* core: `-var` flag inputs starting with `/` work
* core: `-var` flag inputs starting with `/` work
* core: `-var` flag inputs starting with a number work and was fixed in such a way that this should overall be a lot more resilient to inputs ([#8044](https://github.com/hashicorp/terraform/issues/8044))
* provider/aws: Add AWS error message to retry APIGateway account update ([#8533](https://github.com/hashicorp/terraform/issues/8533))
* provider/aws: Do not set empty string to state for `aws_vpn_gateway` availability zone ([#8645](https://github.com/hashicorp/terraform/issues/8645))

View File

@ -2,7 +2,7 @@ Terraform
=========
- Website: http://www.terraform.io
- IRC: `#terraform-tool` on Freenode
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
![Terraform](https://raw.githubusercontent.com/hashicorp/terraform/master/website/source/assets/images/readme.png)
@ -29,7 +29,7 @@ All documentation is available on the [Terraform website](http://www.terraform.i
Developing Terraform
--------------------
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.6+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.7+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.

47
Vagrantfile vendored
View File

@ -5,54 +5,65 @@
VAGRANTFILE_API_VERSION = "2"
$script = <<SCRIPT
GOVERSION="1.7"
GOVERSION="1.7.3"
SRCROOT="/opt/go"
SRCPATH="/opt/gopath"
# Get the ARCH
ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'`
ARCH="$(uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|')"
# Install Prereq Packages
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y build-essential curl git-core libpcre3-dev mercurial pkg-config zip
export DEBIAN_PRIORITY=critical
export DEBIAN_FRONTEND=noninteractive
export DEBCONF_NONINTERACTIVE_SEEN=true
APT_OPTS="--yes --force-yes --no-install-suggests --no-install-recommends"
echo "Upgrading packages ..."
apt-get update ${APT_OPTS} >/dev/null
apt-get upgrade ${APT_OPTS} >/dev/null
echo "Installing prerequisites ..."
apt-get install ${APT_OPTS} build-essential curl git-core libpcre3-dev mercurial pkg-config zip >/dev/null
# Install Go
cd /tmp
wget --quiet https://storage.googleapis.com/golang/go${GOVERSION}.linux-${ARCH}.tar.gz
tar -xvf go${GOVERSION}.linux-${ARCH}.tar.gz
sudo mv go $SRCROOT
sudo chmod 775 $SRCROOT
sudo chown vagrant:vagrant $SRCROOT
echo "Downloading go (${GOVERSION}) ..."
wget -P /tmp --quiet "https://storage.googleapis.com/golang/go${GOVERSION}.linux-${ARCH}.tar.gz"
echo "Setting up go (${GOVERSION}) ..."
tar -C /opt -xf "/tmp/go${GOVERSION}.linux-${ARCH}.tar.gz"
chmod 775 "$SRCROOT"
chown vagrant:vagrant "$SRCROOT"
# Setup the GOPATH; even though the shared folder spec gives the working
# directory the right user/group, we need to set it properly on the
# parent path to allow subsequent "go get" commands to work.
sudo mkdir -p $SRCPATH
sudo chown -R vagrant:vagrant $SRCPATH 2>/dev/null || true
mkdir -p "$SRCPATH"
chown -R vagrant:vagrant "$SRCPATH" 2>/dev/null || true
# ^^ silencing errors here because we expect this to fail for the shared folder
cat <<EOF >/tmp/gopath.sh
install -m0755 /dev/stdin /etc/profile.d/gopath.sh <<EOF
export GOPATH="$SRCPATH"
export GOROOT="$SRCROOT"
export PATH="$SRCROOT/bin:$SRCPATH/bin:\$PATH"
EOF
cat <<EOF >>~/.bashrc
cat >>/home/vagrant/.bashrc <<EOF
## After login, change to terraform directory
cd /opt/gopath/src/github.com/hashicorp/terraform
EOF
sudo mv /tmp/gopath.sh /etc/profile.d/gopath.sh
sudo chmod 0755 /etc/profile.d/gopath.sh
SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "bento/ubuntu-14.04"
config.vm.hostname = "terraform"
config.vm.provision "shell", inline: $script, privileged: false
config.vm.provision "prepare-shell", type: "shell", inline: "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile", privileged: false
config.vm.provision "initial-setup", type: "shell", inline: $script
config.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/terraform'
config.vm.provider "docker" do |v, override|
override.vm.box = "tknerr/baseimage-ubuntu-14.04"
end
["vmware_fusion", "vmware_workstation"].each do |p|
config.vm.provider p do |v|
v.vmx["memsize"] = "4096"

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/pagerduty"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: pagerduty.Provider,
})
}

View File

@ -2,6 +2,8 @@ package archive
import (
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"io/ioutil"
@ -11,13 +13,9 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
func resourceArchiveFile() *schema.Resource {
func dataSourceFile() *schema.Resource {
return &schema.Resource{
Create: resourceArchiveFileCreate,
Read: resourceArchiveFileRead,
Update: resourceArchiveFileUpdate,
Delete: resourceArchiveFileDelete,
Exists: resourceArchiveFileExists,
Read: dataSourceFileRead,
Schema: map[string]*schema.Schema{
"type": &schema.Schema{
@ -64,50 +62,56 @@ func resourceArchiveFile() *schema.Resource {
ForceNew: true,
Description: "SHA1 checksum of output file",
},
"output_base64sha256": &schema.Schema{
Type: schema.TypeString,
Computed: true,
ForceNew: true,
Description: "Base64 Encoded SHA256 checksum of output file",
},
},
}
}
func resourceArchiveFileCreate(d *schema.ResourceData, meta interface{}) error {
if err := resourceArchiveFileUpdate(d, meta); err != nil {
func dataSourceFileRead(d *schema.ResourceData, meta interface{}) error {
outputPath := d.Get("output_path").(string)
outputDirectory := path.Dir(outputPath)
if outputDirectory != "" {
if _, err := os.Stat(outputDirectory); err != nil {
if err := os.MkdirAll(outputDirectory, 0755); err != nil {
return err
}
}
}
if err := archive(d); err != nil {
return err
}
return resourceArchiveFileRead(d, meta)
}
func resourceArchiveFileRead(d *schema.ResourceData, meta interface{}) error {
outputPath := d.Get("output_path").(string)
// Generate archived file stats
fi, err := os.Stat(outputPath)
if os.IsNotExist(err) {
d.SetId("")
d.MarkNewResource()
return nil
if err != nil {
return err
}
sha, err := genFileSha1(outputPath)
sha1, base64sha256, err := genFileShas(outputPath)
if err != nil {
return fmt.Errorf("could not generate file checksum sha: %s", err)
return fmt.Errorf("could not generate file checksum sha256: %s", err)
}
d.Set("output_sha", sha)
d.Set("output_sha", sha1)
d.Set("output_base64sha256", base64sha256)
d.Set("output_size", fi.Size())
d.SetId(d.Get("output_sha").(string))
return nil
}
func resourceArchiveFileUpdate(d *schema.ResourceData, meta interface{}) error {
func archive(d *schema.ResourceData) error {
archiveType := d.Get("type").(string)
outputPath := d.Get("output_path").(string)
outputDirectory := path.Dir(outputPath)
if outputDirectory != "" {
if _, err := os.Stat(outputDirectory); err != nil {
if err := os.MkdirAll(outputDirectory, 0777); err != nil {
return err
}
}
}
archiver := getArchiver(archiveType, outputPath)
if archiver == nil {
return fmt.Errorf("archive type not supported: %s", archiveType)
@ -129,55 +133,22 @@ func resourceArchiveFileUpdate(d *schema.ResourceData, meta interface{}) error {
} else {
return fmt.Errorf("one of 'source_dir', 'source_file', 'source_content_filename' must be specified")
}
// Generate archived file stats
fi, err := os.Stat(outputPath)
if err != nil {
return err
}
sha, err := genFileSha1(outputPath)
if err != nil {
return fmt.Errorf("could not generate file checksum sha: %s", err)
}
d.Set("output_sha", sha)
d.Set("output_size", fi.Size())
d.SetId(d.Get("output_sha").(string))
return nil
}
func resourceArchiveFileDelete(d *schema.ResourceData, meta interface{}) error {
outputPath := d.Get("output_path").(string)
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
return nil
}
if err := os.Remove(outputPath); err != nil {
return fmt.Errorf("could not delete zip file %q: %s", outputPath, err)
}
return nil
}
func resourceArchiveFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
outputPath := d.Get("output_path").(string)
_, err := os.Stat(outputPath)
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
func genFileSha1(filename string) (string, error) {
func genFileShas(filename string) (string, string, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return "", fmt.Errorf("could not compute file '%s' checksum: %s", filename, err)
return "", "", fmt.Errorf("could not compute file '%s' checksum: %s", filename, err)
}
h := sha1.New()
h.Write([]byte(data))
return hex.EncodeToString(h.Sum(nil)), nil
sha1 := hex.EncodeToString(h.Sum(nil))
h256 := sha256.New()
h256.Write([]byte(data))
shaSum := h256.Sum(nil)
sha256base64 := base64.StdEncoding.EncodeToString(shaSum[:])
return sha1, sha256base64, nil
}

View File

@ -13,29 +13,26 @@ func TestAccArchiveFile_Basic(t *testing.T) {
var fileSize string
r.Test(t, r.TestCase{
Providers: testProviders,
CheckDestroy: r.ComposeTestCheckFunc(
testAccArchiveFileMissing("zip_file_acc_test.zip"),
),
Steps: []r.TestStep{
r.TestStep{
Config: testAccArchiveFileContentConfig,
Check: r.ComposeTestCheckFunc(
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
r.TestCheckResourceAttrPtr("archive_file.foo", "output_size", &fileSize),
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
),
},
r.TestStep{
Config: testAccArchiveFileFileConfig,
Check: r.ComposeTestCheckFunc(
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
r.TestCheckResourceAttrPtr("archive_file.foo", "output_size", &fileSize),
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
),
},
r.TestStep{
Config: testAccArchiveFileDirConfig,
Check: r.ComposeTestCheckFunc(
testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize),
r.TestCheckResourceAttrPtr("archive_file.foo", "output_size", &fileSize),
r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize),
),
},
r.TestStep{
@ -60,21 +57,8 @@ func testAccArchiveFileExists(filename string, fileSize *string) r.TestCheckFunc
}
}
func testAccArchiveFileMissing(filename string) r.TestCheckFunc {
return func(s *terraform.State) error {
_, err := os.Stat(filename)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return fmt.Errorf("found file expected to be deleted: %s", filename)
}
}
var testAccArchiveFileContentConfig = `
resource "archive_file" "foo" {
data "archive_file" "foo" {
type = "zip"
source_content = "This is some content"
source_content_filename = "content.txt"
@ -84,7 +68,7 @@ resource "archive_file" "foo" {
var tmpDir = os.TempDir() + "/test"
var testAccArchiveFileOutputPath = fmt.Sprintf(`
resource "archive_file" "foo" {
data "archive_file" "foo" {
type = "zip"
source_content = "This is some content"
source_content_filename = "content.txt"
@ -93,7 +77,7 @@ resource "archive_file" "foo" {
`, tmpDir)
var testAccArchiveFileFileConfig = `
resource "archive_file" "foo" {
data "archive_file" "foo" {
type = "zip"
source_file = "test-fixtures/test-file.txt"
output_path = "zip_file_acc_test.zip"
@ -101,7 +85,7 @@ resource "archive_file" "foo" {
`
var testAccArchiveFileDirConfig = `
resource "archive_file" "foo" {
data "archive_file" "foo" {
type = "zip"
source_dir = "test-fixtures/test-dir"
output_path = "zip_file_acc_test.zip"

View File

@ -7,10 +7,14 @@ import (
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{},
DataSourcesMap: map[string]*schema.Resource{
"archive_file": dataSourceFile(),
},
ResourcesMap: map[string]*schema.Resource{
"archive_file": resourceArchiveFile(),
"archive_file": schema.DataSourceResourceShim(
"archive_file",
dataSourceFile(),
),
},
}
}

View File

@ -21,7 +21,7 @@ import (
"github.com/hashicorp/go-cleanhttp"
)
func GetAccountId(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, error) {
func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
// If we have creds from instance profile, we can use metadata API
if authProviderName == ec2rolecreds.ProviderName {
log.Println("[DEBUG] Trying to get account ID via AWS Metadata API")
@ -30,7 +30,7 @@ func GetAccountId(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (
setOptionalEndpoint(cfg)
sess, err := session.NewSession(cfg)
if err != nil {
return "", errwrap.Wrapf("Error creating AWS session: %s", err)
return "", "", errwrap.Wrapf("Error creating AWS session: %s", err)
}
metadataClient := ec2metadata.New(sess)
@ -38,24 +38,24 @@ func GetAccountId(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (
if err != nil {
// This can be triggered when no IAM Role is assigned
// or AWS just happens to return invalid response
return "", fmt.Errorf("Failed getting EC2 IAM info: %s", err)
return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err)
}
return parseAccountIdFromArn(info.InstanceProfileArn)
return parseAccountInfoFromArn(info.InstanceProfileArn)
}
// Then try IAM GetUser
log.Println("[DEBUG] Trying to get account ID via iam:GetUser")
outUser, err := iamconn.GetUser(nil)
if err == nil {
return parseAccountIdFromArn(*outUser.User.Arn)
return parseAccountInfoFromArn(*outUser.User.Arn)
}
awsErr, ok := err.(awserr.Error)
// AccessDenied and ValidationError can be raised
// if credentials belong to federated profile, so we ignore these
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError") {
return "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
}
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
@ -63,7 +63,7 @@ func GetAccountId(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (
log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity")
outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err == nil {
return *outCallerIdentity.Account, nil
return parseAccountInfoFromArn(*outCallerIdentity.Arn)
}
log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err)
@ -73,22 +73,22 @@ func GetAccountId(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (
MaxItems: aws.Int64(int64(1)),
})
if err != nil {
return "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err)
return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err)
}
if len(outRoles.Roles) < 1 {
return "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available")
return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available")
}
return parseAccountIdFromArn(*outRoles.Roles[0].Arn)
return parseAccountInfoFromArn(*outRoles.Roles[0].Arn)
}
func parseAccountIdFromArn(arn string) (string, error) {
func parseAccountInfoFromArn(arn string) (string, string, error) {
parts := strings.Split(arn, ":")
if len(parts) < 5 {
return "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn)
return "", "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn)
}
return parts[4], nil
return parts[1], parts[4], nil
}
// This function is responsible for reading credentials from the

View File

@ -21,7 +21,7 @@ import (
"github.com/aws/aws-sdk-go/service/sts"
)
func TestAWSGetAccountId_shouldBeValid_fromEC2Role(t *testing.T) {
func TestAWSGetAccountInfo_shouldBeValid_fromEC2Role(t *testing.T) {
resetEnv := unsetEnv(t)
defer resetEnv()
// capture the test server's close method, to call after the test returns
@ -32,18 +32,23 @@ func TestAWSGetAccountId_shouldBeValid_fromEC2Role(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, ec2rolecreds.ProviderName)
part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName)
if err != nil {
t.Fatalf("Getting account ID from EC2 metadata API failed: %s", err)
}
expectedPart := "aws"
if part != expectedPart {
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
}
expectedAccountId := "123456789013"
if id != expectedAccountId {
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
}
}
func TestAWSGetAccountId_shouldBeValid_EC2RoleHasPriority(t *testing.T) {
func TestAWSGetAccountInfo_shouldBeValid_EC2RoleHasPriority(t *testing.T) {
resetEnv := unsetEnv(t)
defer resetEnv()
// capture the test server's close method, to call after the test returns
@ -59,18 +64,23 @@ func TestAWSGetAccountId_shouldBeValid_EC2RoleHasPriority(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, ec2rolecreds.ProviderName)
part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName)
if err != nil {
t.Fatalf("Getting account ID from EC2 metadata API failed: %s", err)
}
expectedPart := "aws"
if part != expectedPart {
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
}
expectedAccountId := "123456789013"
if id != expectedAccountId {
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
}
}
func TestAWSGetAccountId_shouldBeValid_fromIamUser(t *testing.T) {
func TestAWSGetAccountInfo_shouldBeValid_fromIamUser(t *testing.T) {
iamEndpoints := []*iamEndpoint{
{
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
@ -81,18 +91,23 @@ func TestAWSGetAccountId_shouldBeValid_fromIamUser(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, "")
part, id, err := GetAccountInfo(iamConn, stsConn, "")
if err != nil {
t.Fatalf("Getting account ID via GetUser failed: %s", err)
}
expectedPart := "aws"
if part != expectedPart {
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
}
expectedAccountId := "123456789012"
if id != expectedAccountId {
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
}
}
func TestAWSGetAccountId_shouldBeValid_fromGetCallerIdentity(t *testing.T) {
func TestAWSGetAccountInfo_shouldBeValid_fromGetCallerIdentity(t *testing.T) {
iamEndpoints := []*iamEndpoint{
{
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
@ -106,18 +121,23 @@ func TestAWSGetAccountId_shouldBeValid_fromGetCallerIdentity(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, "")
part, id, err := GetAccountInfo(iamConn, stsConn, "")
if err != nil {
t.Fatalf("Getting account ID via GetUser failed: %s", err)
}
expectedPart := "aws"
if part != expectedPart {
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
}
expectedAccountId := "123456789012"
if id != expectedAccountId {
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
}
}
func TestAWSGetAccountId_shouldBeValid_fromIamListRoles(t *testing.T) {
func TestAWSGetAccountInfo_shouldBeValid_fromIamListRoles(t *testing.T) {
iamEndpoints := []*iamEndpoint{
{
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
@ -135,18 +155,23 @@ func TestAWSGetAccountId_shouldBeValid_fromIamListRoles(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, "")
part, id, err := GetAccountInfo(iamConn, stsConn, "")
if err != nil {
t.Fatalf("Getting account ID via ListRoles failed: %s", err)
}
expectedPart := "aws"
if part != expectedPart {
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
}
expectedAccountId := "123456789012"
if id != expectedAccountId {
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
}
}
func TestAWSGetAccountId_shouldBeValid_federatedRole(t *testing.T) {
func TestAWSGetAccountInfo_shouldBeValid_federatedRole(t *testing.T) {
iamEndpoints := []*iamEndpoint{
{
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
@ -160,18 +185,23 @@ func TestAWSGetAccountId_shouldBeValid_federatedRole(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, "")
part, id, err := GetAccountInfo(iamConn, stsConn, "")
if err != nil {
t.Fatalf("Getting account ID via ListRoles failed: %s", err)
}
expectedPart := "aws"
if part != expectedPart {
t.Fatalf("Expected partition: %s, given: %s", expectedPart, part)
}
expectedAccountId := "123456789012"
if id != expectedAccountId {
t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id)
}
}
func TestAWSGetAccountId_shouldError_unauthorizedFromIam(t *testing.T) {
func TestAWSGetAccountInfo_shouldError_unauthorizedFromIam(t *testing.T) {
iamEndpoints := []*iamEndpoint{
{
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
@ -185,29 +215,37 @@ func TestAWSGetAccountId_shouldError_unauthorizedFromIam(t *testing.T) {
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
defer ts()
id, err := GetAccountId(iamConn, stsConn, "")
part, id, err := GetAccountInfo(iamConn, stsConn, "")
if err == nil {
t.Fatal("Expected error when getting account ID")
}
if part != "" {
t.Fatalf("Expected no partition, given: %s", part)
}
if id != "" {
t.Fatalf("Expected no account ID, given: %s", id)
}
}
func TestAWSParseAccountIdFromArn(t *testing.T) {
func TestAWSParseAccountInfoFromArn(t *testing.T) {
validArn := "arn:aws:iam::101636750127:instance-profile/aws-elasticbeanstalk-ec2-role"
expectedPart := "aws"
expectedId := "101636750127"
id, err := parseAccountIdFromArn(validArn)
part, id, err := parseAccountInfoFromArn(validArn)
if err != nil {
t.Fatalf("Expected no error when parsing valid ARN: %s", err)
}
if part != expectedPart {
t.Fatalf("Parsed part doesn't match with expected (%q != %q)", part, expectedPart)
}
if id != expectedId {
t.Fatalf("Parsed id doesn't match with expected (%q != %q)", id, expectedId)
}
invalidArn := "blablah"
id, err = parseAccountIdFromArn(invalidArn)
part, id, err = parseAccountInfoFromArn(invalidArn)
if err == nil {
t.Fatalf("Expected error when parsing invalid ARN (%q)", invalidArn)
}

View File

@ -6,7 +6,9 @@ import (
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@ -54,6 +56,7 @@ import (
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/waf"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/helper/logging"
@ -118,6 +121,7 @@ type AWSClient struct {
stsconn *sts.STS
redshiftconn *redshift.Redshift
r53conn *route53.Route53
partition string
accountid string
region string
rdsconn *rds.RDS
@ -199,6 +203,10 @@ func (c *Config) Client() (interface{}, error) {
}
sess.Handlers.Build.PushFrontNamed(addTerraformVersionToUserAgent)
if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" {
sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure)
}
// Some services exist only in us-east-1, e.g. because they manage
// resources that can span across multiple regions, or because
// signature format v4 requires region to be us-east-1 for global
@ -226,8 +234,9 @@ func (c *Config) Client() (interface{}, error) {
}
if !c.SkipRequestingAccountId {
accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName)
partition, accountId, err := GetAccountInfo(client.iamconn, client.stsconn, cp.ProviderName)
if err == nil {
client.partition = partition
client.accountid = accountId
}
}
@ -284,7 +293,7 @@ func (c *Config) Client() (interface{}, error) {
// ValidateRegion returns an error if the configured region is not a
// valid aws region and nil otherwise.
func (c *Config) ValidateRegion() error {
var regions = [13]string{
var regions = []string{
"ap-northeast-1",
"ap-northeast-2",
"ap-south-1",
@ -295,6 +304,7 @@ func (c *Config) ValidateRegion() error {
"eu-west-1",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-gov-west-1",
"us-west-1",
"us-west-2",
@ -351,6 +361,17 @@ var addTerraformVersionToUserAgent = request.NamedHandler{
"terraform", terraform.VersionString()),
}
var debugAuthFailure = request.NamedHandler{
Name: "terraform.AuthFailureAdditionalDebugHandler",
Fn: func(req *request.Request) {
if isAWSErr(req.Error, "AuthFailure", "AWS was not able to validate the provided access credentials") {
log.Printf("[INFO] Additional AuthFailure Debugging Context")
log.Printf("[INFO] Current system UTC time: %s", time.Now().UTC())
log.Printf("[INFO] Request object: %s", spew.Sdump(req))
}
},
}
type awsLogger struct{}
func (l awsLogger) Log(args ...interface{}) {

View File

@ -19,24 +19,24 @@ func dataSourceAwsAmi() *schema.Resource {
Read: dataSourceAwsAmiRead,
Schema: map[string]*schema.Schema{
"executable_users": &schema.Schema{
"executable_users": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"filter": &schema.Schema{
"filter": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"values": &schema.Schema{
"values": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
@ -44,158 +44,159 @@ func dataSourceAwsAmi() *schema.Resource {
},
},
},
"name_regex": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
"name_regex": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateNameRegex,
},
"most_recent": &schema.Schema{
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"owners": &schema.Schema{
"owners": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
// Computed values.
"architecture": &schema.Schema{
"architecture": {
Type: schema.TypeString,
Computed: true,
},
"creation_date": &schema.Schema{
"creation_date": {
Type: schema.TypeString,
Computed: true,
},
"description": &schema.Schema{
"description": {
Type: schema.TypeString,
Computed: true,
},
"hypervisor": &schema.Schema{
"hypervisor": {
Type: schema.TypeString,
Computed: true,
},
"image_id": &schema.Schema{
"image_id": {
Type: schema.TypeString,
Computed: true,
},
"image_location": &schema.Schema{
"image_location": {
Type: schema.TypeString,
Computed: true,
},
"image_owner_alias": &schema.Schema{
"image_owner_alias": {
Type: schema.TypeString,
Computed: true,
},
"image_type": &schema.Schema{
"image_type": {
Type: schema.TypeString,
Computed: true,
},
"kernel_id": &schema.Schema{
"kernel_id": {
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
},
"owner_id": &schema.Schema{
"owner_id": {
Type: schema.TypeString,
Computed: true,
},
"platform": &schema.Schema{
"platform": {
Type: schema.TypeString,
Computed: true,
},
"public": &schema.Schema{
"public": {
Type: schema.TypeBool,
Computed: true,
},
"ramdisk_id": &schema.Schema{
"ramdisk_id": {
Type: schema.TypeString,
Computed: true,
},
"root_device_name": &schema.Schema{
"root_device_name": {
Type: schema.TypeString,
Computed: true,
},
"root_device_type": &schema.Schema{
"root_device_type": {
Type: schema.TypeString,
Computed: true,
},
"sriov_net_support": &schema.Schema{
"sriov_net_support": {
Type: schema.TypeString,
Computed: true,
},
"state": &schema.Schema{
"state": {
Type: schema.TypeString,
Computed: true,
},
"virtualization_type": &schema.Schema{
"virtualization_type": {
Type: schema.TypeString,
Computed: true,
},
// Complex computed values
"block_device_mappings": &schema.Schema{
"block_device_mappings": {
Type: schema.TypeSet,
Computed: true,
Set: amiBlockDeviceMappingHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": &schema.Schema{
"device_name": {
Type: schema.TypeString,
Computed: true,
},
"no_device": &schema.Schema{
"no_device": {
Type: schema.TypeString,
Computed: true,
},
"virtual_name": &schema.Schema{
"virtual_name": {
Type: schema.TypeString,
Computed: true,
},
"ebs": &schema.Schema{
"ebs": {
Type: schema.TypeMap,
Computed: true,
},
},
},
},
"product_codes": &schema.Schema{
"product_codes": {
Type: schema.TypeSet,
Computed: true,
Set: amiProductCodesHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"product_code_id": &schema.Schema{
"product_code_id": {
Type: schema.TypeString,
Computed: true,
},
"product_code_type": &schema.Schema{
"product_code_type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"state_reason": &schema.Schema{
"state_reason": {
Type: schema.TypeMap,
Computed: true,
},
"tags": &schema.Schema{
"tags": {
Type: schema.TypeSet,
Computed: true,
Set: amiTagsHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": &schema.Schema{
"key": {
Type: schema.TypeString,
Computed: true,
},
"value": &schema.Schema{
"value": {
Type: schema.TypeString,
Computed: true,
},
@ -270,11 +271,12 @@ func dataSourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Your query returned more than one result. Please try a more " +
"specific search criteria, or set `most_recent` attribute to true.")
}
} else {
// Query returned single result.
image = filteredImages[0]
}
image = filteredImages[0]
log.Printf("[DEBUG] aws_ami - Single AMI found: %s", *image.ImageId)
return amiDescriptionAttributes(d, image)
}
@ -496,3 +498,14 @@ func amiTagsHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%s-", m["value"].(string)))
return hashcode.String(buf.String())
}
func validateNameRegex(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if _, err := regexp.Compile(value); err != nil {
errors = append(errors, fmt.Errorf(
"%q contains an invalid regular expression: %s",
k, err))
}
return
}

View File

@ -155,6 +155,57 @@ func TestAccAWSAmiDataSource_localNameFilter(t *testing.T) {
})
}
func TestResourceValidateNameRegex(t *testing.T) {
type testCases struct {
Value string
ErrCount int
}
invalidCases := []testCases{
{
Value: `\`,
ErrCount: 1,
},
{
Value: `**`,
ErrCount: 1,
},
{
Value: `(.+`,
ErrCount: 1,
},
}
for _, tc := range invalidCases {
_, errors := validateNameRegex(tc.Value, "name_regex")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected %q to trigger a validation error.", tc.Value)
}
}
validCases := []testCases{
{
Value: `\/`,
ErrCount: 0,
},
{
Value: `.*`,
ErrCount: 0,
},
{
Value: `\b(?:\d{1,3}\.){3}\d{1,3}\b`,
ErrCount: 0,
},
}
for _, tc := range validCases {
_, errors := validateNameRegex(tc.Value, "name_regex")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected %q not to trigger a validation error.", tc.Value)
}
}
}
func testAccCheckAwsAmiDataSourceDestroy(s *terraform.State) error {
return nil
}

View File

@ -0,0 +1,89 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAvailabilityZone() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAvailabilityZoneRead,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"region": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name_suffix": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"state": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeAvailabilityZonesInput{}
if name := d.Get("name"); name != "" {
req.ZoneNames = []*string{aws.String(name.(string))}
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"state": d.Get("state").(string),
},
)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeAvailabilityZones %s\n", req)
resp, err := conn.DescribeAvailabilityZones(req)
if err != nil {
return err
}
if resp == nil || len(resp.AvailabilityZones) == 0 {
return fmt.Errorf("no matching AZ found")
}
if len(resp.AvailabilityZones) > 1 {
return fmt.Errorf("multiple AZs matched; use additional constraints to reduce matches to a single AZ")
}
az := resp.AvailabilityZones[0]
// As a convenience when working with AZs generically, we expose
// the AZ suffix alone, without the region name.
// This can be used e.g. to create lookup tables by AZ letter that
// work regardless of region.
nameSuffix := (*az.ZoneName)[len(*az.RegionName):]
d.SetId(*az.ZoneName)
d.Set("id", az.ZoneName)
d.Set("name", az.ZoneName)
d.Set("name_suffix", nameSuffix)
d.Set("region", az.RegionName)
d.Set("state", az.State)
return nil
}

View File

@ -0,0 +1,57 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsAvailabilityZone(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataSourceAwsAvailabilityZoneConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsAvailabilityZoneCheck("data.aws_availability_zone.by_name"),
),
},
},
})
}
func testAccDataSourceAwsAvailabilityZoneCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
attr := rs.Primary.Attributes
if attr["name"] != "us-west-2a" {
return fmt.Errorf("bad name %s", attr["name"])
}
if attr["name_suffix"] != "a" {
return fmt.Errorf("bad name_suffix %s", attr["name_suffix"])
}
if attr["region"] != "us-west-2" {
return fmt.Errorf("bad region %s", attr["region"])
}
return nil
}
}
const testAccDataSourceAwsAvailabilityZoneConfig = `
provider "aws" {
region = "us-west-2"
}
data "aws_availability_zone" "by_name" {
name = "us-west-2a"
}
`

View File

@ -0,0 +1,29 @@
package aws
import (
"github.com/hashicorp/terraform/helper/schema"
)
// See http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2
var billingAccountId = "386209384616"
func dataSourceAwsBillingServiceAccount() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsBillingServiceAccountRead,
Schema: map[string]*schema.Schema{
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsBillingServiceAccountRead(d *schema.ResourceData, meta interface{}) error {
d.SetId(billingAccountId)
d.Set("arn", "arn:aws:iam::"+billingAccountId+":root")
return nil
}

View File

@ -0,0 +1,27 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSBillingServiceAccount_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckAwsBillingServiceAccountConfig,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("data.aws_billing_service_account.main", "id", "386209384616"),
resource.TestCheckResourceAttr("data.aws_billing_service_account.main", "arn", "arn:aws:iam::386209384616:root"),
),
},
},
})
}
const testAccCheckAwsBillingServiceAccountConfig = `
data "aws_billing_service_account" "main" { }
`

View File

@ -18,6 +18,7 @@ var elbAccountIdPerRegionMap = map[string]string{
"eu-west-1": "156460612806",
"sa-east-1": "507241528517",
"us-east-1": "127311923021",
"us-east-2": "033677994240",
"us-gov-west": "048591011584",
"us-west-1": "027434742980",
"us-west-2": "797873946194",

View File

@ -0,0 +1,70 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsPrefixList() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsPrefixListRead,
Schema: map[string]*schema.Schema{
"prefix_list_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
// Computed values.
"id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"cidr_blocks": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribePrefixListsInput{}
if prefixListID := d.Get("prefix_list_id"); prefixListID != "" {
req.PrefixListIds = []*string{aws.String(prefixListID.(string))}
}
log.Printf("[DEBUG] DescribePrefixLists %s\n", req)
resp, err := conn.DescribePrefixLists(req)
if err != nil {
return err
}
if resp == nil || len(resp.PrefixLists) == 0 {
return fmt.Errorf("no matching prefix list found; the prefix list ID may be invalid or not exist in the current region")
}
pl := resp.PrefixLists[0]
d.SetId(*pl.PrefixListId)
d.Set("id", pl.PrefixListId)
d.Set("name", pl.PrefixListName)
cidrs := make([]string, len(pl.Cidrs))
for i, v := range pl.Cidrs {
cidrs[i] = *v
}
d.Set("cidr_blocks", cidrs)
return nil
}

View File

@ -0,0 +1,67 @@
package aws
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsPrefixList(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataSourceAwsPrefixListConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsPrefixListCheck("data.aws_prefix_list.s3"),
),
},
},
})
}
func testAccDataSourceAwsPrefixListCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
attr := rs.Primary.Attributes
if attr["name"] != "com.amazonaws.us-west-2.s3" {
return fmt.Errorf("bad name %s", attr["name"])
}
if attr["id"] != "pl-68a54001" {
return fmt.Errorf("bad id %s", attr["id"])
}
var (
cidrBlockSize int
err error
)
if cidrBlockSize, err = strconv.Atoi(attr["cidr_blocks.#"]); err != nil {
return err
}
if cidrBlockSize < 1 {
return fmt.Errorf("cidr_blocks seem suspiciously low: %d", cidrBlockSize)
}
return nil
}
}
const testAccDataSourceAwsPrefixListConfig = `
provider "aws" {
region = "us-west-2"
}
data "aws_prefix_list" "s3" {
prefix_list_id = "pl-68a54001"
}
`

View File

@ -9,6 +9,7 @@ import (
// See http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging
var redshiftServiceAccountPerRegionMap = map[string]string{
"us-east-1": "193672423079",
"us-east-2": "391106570357",
"us-west-1": "262260360010",
"us-west-2": "902366379725",
"ap-south-1": "865932855811",

View File

@ -0,0 +1,84 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsRegion() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsRegionRead,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"current": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
currentRegion := meta.(*AWSClient).region
req := &ec2.DescribeRegionsInput{}
req.RegionNames = make([]*string, 0, 2)
if name := d.Get("name").(string); name != "" {
req.RegionNames = append(req.RegionNames, aws.String(name))
}
if d.Get("current").(bool) {
req.RegionNames = append(req.RegionNames, aws.String(currentRegion))
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"endpoint": d.Get("endpoint").(string),
},
)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeRegions %s\n", req)
resp, err := conn.DescribeRegions(req)
if err != nil {
return err
}
if resp == nil || len(resp.Regions) == 0 {
return fmt.Errorf("no matching regions found")
}
if len(resp.Regions) > 1 {
return fmt.Errorf("multiple regions matched; use additional constraints to reduce matches to a single region")
}
region := resp.Regions[0]
d.SetId(*region.RegionName)
d.Set("id", region.RegionName)
d.Set("name", region.RegionName)
d.Set("endpoint", region.Endpoint)
d.Set("current", *region.RegionName == currentRegion)
return nil
}

View File

@ -0,0 +1,64 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsRegion(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataSourceAwsRegionConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsRegionCheck("data.aws_region.by_name_current", "us-west-2", "true"),
testAccDataSourceAwsRegionCheck("data.aws_region.by_name_other", "us-west-1", "false"),
testAccDataSourceAwsRegionCheck("data.aws_region.by_current", "us-west-2", "true"),
),
},
},
})
}
func testAccDataSourceAwsRegionCheck(name, region, current string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
attr := rs.Primary.Attributes
if attr["name"] != region {
return fmt.Errorf("bad name %s", attr["name"])
}
if attr["current"] != current {
return fmt.Errorf("bad current %s; want %s", attr["current"], current)
}
return nil
}
}
const testAccDataSourceAwsRegionConfig = `
provider "aws" {
region = "us-west-2"
}
data "aws_region" "by_name_current" {
name = "us-west-2"
}
data "aws_region" "by_name_other" {
name = "us-west-1"
}
data "aws_region" "by_current" {
current = true
}
`

View File

@ -0,0 +1,86 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSecurityGroup() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsSecurityGroupRead,
Schema: map[string]*schema.Schema{
"vpc_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeSecurityGroupsInput{}
if id, idExists := d.GetOk("id"); idExists {
req.GroupIds = []*string{aws.String(id.(string))}
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"group-name": d.Get("name").(string),
"vpc-id": d.Get("vpc_id").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] Describe Security Groups %v\n", req)
resp, err := conn.DescribeSecurityGroups(req)
if err != nil {
return err
}
if resp == nil || len(resp.SecurityGroups) == 0 {
return fmt.Errorf("no matching SecurityGroup found")
}
if len(resp.SecurityGroups) > 1 {
return fmt.Errorf("multiple Security Groups matched; use additional constraints to reduce matches to a single Security Group")
}
sg := resp.SecurityGroups[0]
d.SetId(*sg.GroupId)
d.Set("id", sg.VpcId)
d.Set("name", sg.GroupName)
d.Set("description", sg.Description)
d.Set("vpc_id", sg.VpcId)
d.Set("tags", tagsToMap(sg.Tags))
return nil
}

View File

@ -0,0 +1,109 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsSecurityGroup(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataSourceAwsSecurityGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_id"),
testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_tag"),
testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_filter"),
testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_name"),
),
},
},
})
}
func testAccDataSourceAwsSecurityGroupCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
SGRs, ok := s.RootModule().Resources["aws_security_group.test"]
if !ok {
return fmt.Errorf("can't find aws_security_group.test in state")
}
vpcRs, ok := s.RootModule().Resources["aws_vpc.test"]
if !ok {
return fmt.Errorf("can't find aws_vpc.test in state")
}
attr := rs.Primary.Attributes
if attr["id"] != SGRs.Primary.Attributes["id"] {
return fmt.Errorf(
"id is %s; want %s",
attr["id"],
SGRs.Primary.Attributes["id"],
)
}
if attr["vpc_id"] != vpcRs.Primary.Attributes["id"] {
return fmt.Errorf(
"vpc_id is %s; want %s",
attr["vpc_id"],
vpcRs.Primary.Attributes["id"],
)
}
if attr["tags.Name"] != "terraform-testacc-security-group-data-source" {
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
}
return nil
}
}
const testAccDataSourceAwsSecurityGroupConfig = `
provider "aws" {
region = "eu-west-1"
}
resource "aws_vpc" "test" {
cidr_block = "172.16.0.0/16"
tags {
Name = "terraform-testacc-subnet-data-source"
}
}
resource "aws_security_group" "test" {
vpc_id = "${aws_vpc.test.id}"
name = "security-groupe-name-test"
tags {
Name = "terraform-testacc-security-group-data-source"
}
}
data "aws_security_group" "by_id" {
id = "${aws_security_group.test.id}"
}
data "aws_security_group" "by_name" {
name = "${aws_security_group.test.name}"
}
data "aws_security_group" "by_tag" {
tags {
Name = "${aws_security_group.test.tags["Name"]}"
}
}
data "aws_security_group" "by_filter" {
filter {
name = "group-name"
values = ["${aws_security_group.test.name}"]
}
}
`

View File

@ -0,0 +1,123 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSubnet() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsSubnetRead,
Schema: map[string]*schema.Schema{
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"cidr_block": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"default_for_az": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"state": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchemaComputed(),
"vpc_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeSubnetsInput{}
if id := d.Get("id"); id != "" {
req.SubnetIds = []*string{aws.String(id.(string))}
}
// We specify default_for_az as boolean, but EC2 filters want
// it to be serialized as a string. Note that setting it to
// "false" here does not actually filter by it *not* being
// the default, because Terraform can't distinguish between
// "false" and "not set".
defaultForAzStr := ""
if d.Get("default_for_az").(bool) {
defaultForAzStr = "true"
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"availabilityZone": d.Get("availability_zone").(string),
"cidrBlock": d.Get("cidr_block").(string),
"defaultForAz": defaultForAzStr,
"state": d.Get("state").(string),
"vpc-id": d.Get("vpc_id").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeSubnets %s\n", req)
resp, err := conn.DescribeSubnets(req)
if err != nil {
return err
}
if resp == nil || len(resp.Subnets) == 0 {
return fmt.Errorf("no matching subnet found")
}
if len(resp.Subnets) > 1 {
return fmt.Errorf("multiple subnets matched; use additional constraints to reduce matches to a single subnet")
}
subnet := resp.Subnets[0]
d.SetId(*subnet.SubnetId)
d.Set("id", subnet.SubnetId)
d.Set("vpc_id", subnet.VpcId)
d.Set("availability_zone", subnet.AvailabilityZone)
d.Set("cidr_block", subnet.CidrBlock)
d.Set("default_for_az", subnet.DefaultForAz)
d.Set("state", subnet.State)
d.Set("tags", tagsToMap(subnet.Tags))
return nil
}

View File

@ -0,0 +1,125 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsSubnet(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataSourceAwsSubnetConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_id"),
testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_cidr"),
testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_tag"),
testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_vpc"),
testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_filter"),
),
},
},
})
}
func testAccDataSourceAwsSubnetCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
vpcRs, ok := s.RootModule().Resources["aws_vpc.test"]
if !ok {
return fmt.Errorf("can't find aws_vpc.test in state")
}
subnetRs, ok := s.RootModule().Resources["aws_subnet.test"]
if !ok {
return fmt.Errorf("can't find aws_subnet.test in state")
}
attr := rs.Primary.Attributes
if attr["id"] != subnetRs.Primary.Attributes["id"] {
return fmt.Errorf(
"id is %s; want %s",
attr["id"],
subnetRs.Primary.Attributes["id"],
)
}
if attr["vpc_id"] != vpcRs.Primary.Attributes["id"] {
return fmt.Errorf(
"vpc_id is %s; want %s",
attr["vpc_id"],
vpcRs.Primary.Attributes["id"],
)
}
if attr["cidr_block"] != "172.16.123.0/24" {
return fmt.Errorf("bad cidr_block %s", attr["cidr_block"])
}
if attr["availability_zone"] != "us-west-2a" {
return fmt.Errorf("bad availability_zone %s", attr["availability_zone"])
}
if attr["tags.Name"] != "terraform-testacc-subnet-data-source" {
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
}
return nil
}
}
const testAccDataSourceAwsSubnetConfig = `
provider "aws" {
region = "us-west-2"
}
resource "aws_vpc" "test" {
cidr_block = "172.16.0.0/16"
tags {
Name = "terraform-testacc-subnet-data-source"
}
}
resource "aws_subnet" "test" {
vpc_id = "${aws_vpc.test.id}"
cidr_block = "172.16.123.0/24"
availability_zone = "us-west-2a"
tags {
Name = "terraform-testacc-subnet-data-source"
}
}
data "aws_subnet" "by_id" {
id = "${aws_subnet.test.id}"
}
data "aws_subnet" "by_cidr" {
cidr_block = "${aws_subnet.test.cidr_block}"
}
data "aws_subnet" "by_tag" {
tags {
Name = "${aws_subnet.test.tags["Name"]}"
}
}
data "aws_subnet" "by_vpc" {
vpc_id = "${aws_subnet.test.vpc_id}"
}
data "aws_subnet" "by_filter" {
filter {
name = "vpc-id"
values = ["${aws_subnet.test.vpc_id}"]
}
}
`

View File

@ -0,0 +1,121 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsVpc() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsVpcRead,
Schema: map[string]*schema.Schema{
"cidr_block": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"dhcp_options_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"default": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"instance_tenancy": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"state": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeVpcsInput{}
if id := d.Get("id"); id != "" {
req.VpcIds = []*string{aws.String(id.(string))}
}
// We specify "default" as boolean, but EC2 filters want
// it to be serialized as a string. Note that setting it to
// "false" here does not actually filter by it *not* being
// the default, because Terraform can't distinguish between
// "false" and "not set".
isDefaultStr := ""
if d.Get("default").(bool) {
isDefaultStr = "true"
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"cidr": d.Get("cidr_block").(string),
"dhcp-options-id": d.Get("dhcp_options_id").(string),
"isDefault": isDefaultStr,
"state": d.Get("state").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeVpcs %s\n", req)
resp, err := conn.DescribeVpcs(req)
if err != nil {
return err
}
if resp == nil || len(resp.Vpcs) == 0 {
return fmt.Errorf("no matching VPC found")
}
if len(resp.Vpcs) > 1 {
return fmt.Errorf("multiple VPCs matched; use additional constraints to reduce matches to a single VPC")
}
vpc := resp.Vpcs[0]
d.SetId(*vpc.VpcId)
d.Set("id", vpc.VpcId)
d.Set("cidr_block", vpc.CidrBlock)
d.Set("dhcp_options_id", vpc.DhcpOptionsId)
d.Set("instance_tenancy", vpc.InstanceTenancy)
d.Set("default", vpc.IsDefault)
d.Set("state", vpc.State)
d.Set("tags", tagsToMap(vpc.Tags))
return nil
}

View File

@ -0,0 +1,95 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsVpc(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataSourceAwsVpcConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr"),
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag"),
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter"),
),
},
},
})
}
func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
vpcRs, ok := s.RootModule().Resources["aws_vpc.test"]
if !ok {
return fmt.Errorf("can't find aws_vpc.test in state")
}
attr := rs.Primary.Attributes
if attr["id"] != vpcRs.Primary.Attributes["id"] {
return fmt.Errorf(
"id is %s; want %s",
attr["id"],
vpcRs.Primary.Attributes["id"],
)
}
if attr["cidr_block"] != "172.16.0.0/16" {
return fmt.Errorf("bad cidr_block %s", attr["cidr_block"])
}
if attr["tags.Name"] != "terraform-testacc-vpc-data-source" {
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
}
return nil
}
}
const testAccDataSourceAwsVpcConfig = `
provider "aws" {
region = "us-west-2"
}
resource "aws_vpc" "test" {
cidr_block = "172.16.0.0/16"
tags {
Name = "terraform-testacc-vpc-data-source"
}
}
data "aws_vpc" "by_id" {
id = "${aws_vpc.test.id}"
}
data "aws_vpc" "by_cidr" {
cidr_block = "${aws_vpc.test.cidr_block}"
}
data "aws_vpc" "by_tag" {
tags {
Name = "${aws_vpc.test.tags["Name"]}"
}
}
data "aws_vpc" "by_filter" {
filter {
name = "cidr"
values = ["${aws_vpc.test.cidr_block}"]
}
}
`

View File

@ -0,0 +1,163 @@
package aws
import (
"fmt"
"sort"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
// buildEC2AttributeFilterList takes a flat map of scalar attributes (most
// likely values extracted from a *schema.ResourceData on an EC2-querying
// data source) and produces a []*ec2.Filter representing an exact match
// for each of the given non-empty attributes.
//
// The keys of the given attributes map are the attribute names expected
// by the EC2 API, which are usually either in camelcase or with dash-separated
// words. We conventionally map these to underscore-separated identifiers
// with the same words when presenting these as data source query attributes
// in Terraform.
//
// It's the callers responsibility to transform any non-string values into
// the appropriate string serialization required by the AWS API when
// encoding the given filter. Any attributes given with empty string values
// are ignored, assuming that the user wishes to leave that attribute
// unconstrained while filtering.
//
// The purpose of this function is to create values to pass in
// for the "Filters" attribute on most of the "Describe..." API functions in
// the EC2 API, to aid in the implementation of Terraform data sources that
// retrieve data about EC2 objects.
func buildEC2AttributeFilterList(attrs map[string]string) []*ec2.Filter {
var filters []*ec2.Filter
// sort the filters by name to make the output deterministic
var names []string
for filterName := range attrs {
names = append(names, filterName)
}
sort.Strings(names)
for _, filterName := range names {
value := attrs[filterName]
if value == "" {
continue
}
filters = append(filters, &ec2.Filter{
Name: aws.String(filterName),
Values: []*string{aws.String(value)},
})
}
return filters
}
// buildEC2TagFilterList takes a []*ec2.Tag and produces a []*ec2.Filter that
// represents exact matches for all of the tag key/value pairs given in
// the tag set.
//
// The purpose of this function is to create values to pass in for
// the "Filters" attribute on most of the "Describe..." API functions
// in the EC2 API, to implement filtering by tag values e.g. in Terraform
// data sources that retrieve data about EC2 objects.
//
// It is conventional for an EC2 data source to include an attribute called
// "tags" which conforms to the schema returned by the tagsSchema() function.
// The value of this can then be converted to a tags slice using tagsFromMap,
// and the result finally passed in to this function.
//
// In Terraform configuration this would then look like this, to constrain
// results by name:
//
// tags {
// Name = "my-awesome-subnet"
// }
func buildEC2TagFilterList(tags []*ec2.Tag) []*ec2.Filter {
filters := make([]*ec2.Filter, len(tags))
for i, tag := range tags {
filters[i] = &ec2.Filter{
Name: aws.String(fmt.Sprintf("tag:%s", *tag.Key)),
Values: []*string{tag.Value},
}
}
return filters
}
// ec2CustomFiltersSchema returns a *schema.Schema that represents
// a set of custom filtering criteria that a user can specify as input
// to a data source that wraps one of the many "Describe..." API calls
// in the EC2 API.
//
// It is conventional for an attribute of this type to be included
// as a top-level attribute called "filter". This is the "catch all" for
// filter combinations that are not possible to express using scalar
// attributes or tags. In Terraform configuration, the custom filter blocks
// then look like this:
//
// filter {
// name = "availabilityZone"
// values = ["us-west-2a", "us-west-2b"]
// }
func ec2CustomFiltersSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"values": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
}
}
// buildEC2CustomFilterList takes the set value extracted from a schema
// attribute conforming to the schema returned by ec2CustomFiltersSchema,
// and transforms it into a []*ec2.Filter representing the same filter
// expressions which is ready to pass into the "Filters" attribute on most
// of the "Describe..." functions in the EC2 API.
//
// This function is intended only to be used in conjunction with
// ec2CustomFitlersSchema. See the docs on that function for more details
// on the configuration pattern this is intended to support.
func buildEC2CustomFilterList(filterSet *schema.Set) []*ec2.Filter {
if filterSet == nil {
return []*ec2.Filter{}
}
customFilters := filterSet.List()
filters := make([]*ec2.Filter, len(customFilters))
for filterIdx, customFilterI := range customFilters {
customFilterMapI := customFilterI.(map[string]interface{})
name := customFilterMapI["name"].(string)
valuesI := customFilterMapI["values"].(*schema.Set).List()
values := make([]*string, len(valuesI))
for valueIdx, valueI := range valuesI {
values[valueIdx] = aws.String(valueI.(string))
}
filters[filterIdx] = &ec2.Filter{
Name: &name,
Values: values,
}
}
return filters
}

View File

@ -0,0 +1,158 @@
package aws
import (
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func TestBuildEC2AttributeFilterList(t *testing.T) {
type TestCase struct {
Attrs map[string]string
Expected []*ec2.Filter
}
testCases := []TestCase{
{
map[string]string{
"foo": "bar",
"baz": "boo",
},
[]*ec2.Filter{
{
Name: aws.String("baz"),
Values: []*string{aws.String("boo")},
},
{
Name: aws.String("foo"),
Values: []*string{aws.String("bar")},
},
},
},
{
map[string]string{
"foo": "bar",
"baz": "",
},
[]*ec2.Filter{
{
Name: aws.String("foo"),
Values: []*string{aws.String("bar")},
},
},
},
}
for i, testCase := range testCases {
result := buildEC2AttributeFilterList(testCase.Attrs)
if !reflect.DeepEqual(result, testCase.Expected) {
t.Errorf(
"test case %d: got %#v, but want %#v",
i, result, testCase.Expected,
)
}
}
}
func TestBuildEC2TagFilterList(t *testing.T) {
type TestCase struct {
Tags []*ec2.Tag
Expected []*ec2.Filter
}
testCases := []TestCase{
{
[]*ec2.Tag{
{
Key: aws.String("foo"),
Value: aws.String("bar"),
},
{
Key: aws.String("baz"),
Value: aws.String("boo"),
},
},
[]*ec2.Filter{
{
Name: aws.String("tag:foo"),
Values: []*string{aws.String("bar")},
},
{
Name: aws.String("tag:baz"),
Values: []*string{aws.String("boo")},
},
},
},
}
for i, testCase := range testCases {
result := buildEC2TagFilterList(testCase.Tags)
if !reflect.DeepEqual(result, testCase.Expected) {
t.Errorf(
"test case %d: got %#v, but want %#v",
i, result, testCase.Expected,
)
}
}
}
func TestBuildEC2CustomFilterList(t *testing.T) {
// We need to get a set with the appropriate hash function,
// so we'll use the schema to help us produce what would
// be produced in the normal case.
filtersSchema := ec2CustomFiltersSchema()
// The zero value of this schema will be an interface{}
// referring to a new, empty *schema.Set with the
// appropriate hash function configured.
filters := filtersSchema.ZeroValue().(*schema.Set)
// We also need an appropriately-configured set for
// the list of values.
valuesSchema := filtersSchema.Elem.(*schema.Resource).Schema["values"]
valuesSet := func(vals ...string) *schema.Set {
ret := valuesSchema.ZeroValue().(*schema.Set)
for _, val := range vals {
ret.Add(val)
}
return ret
}
filters.Add(map[string]interface{}{
"name": "foo",
"values": valuesSet("bar", "baz"),
})
filters.Add(map[string]interface{}{
"name": "pizza",
"values": valuesSet("cheese"),
})
expected := []*ec2.Filter{
// These are produced in the deterministic order guaranteed
// by schema.Set.List(), which happens to produce them in
// the following order for our current input. If this test
// evolves with different input data in future then they
// will likely be emitted in a different order, which is fine.
{
Name: aws.String("pizza"),
Values: []*string{aws.String("cheese")},
},
{
Name: aws.String("foo"),
Values: []*string{aws.String("bar"), aws.String("baz")},
},
}
result := buildEC2CustomFilterList(filters)
if !reflect.DeepEqual(result, expected) {
t.Errorf(
"got %#v, but want %#v",
result, expected,
)
}
}

View File

@ -5,6 +5,7 @@ package aws
// It currently cannot be generated from the API json.
var hostedZoneIDsMap = map[string]string{
"us-east-1": "Z3AQBSTGFYJSTF",
"us-east-2": "Z2O1EMRO9K5GLX",
"us-west-2": "Z3BJ6K6RIION7M",
"us-west-1": "Z2F56UZL2M1ACD",
"eu-west-1": "Z1BKCTXD74EZPE",

View File

@ -1,12 +1,17 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSCloudFrontDistribution_importBasic(t *testing.T) {
ri := acctest.RandInt()
testConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3Config, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig())
resourceName := "aws_cloudfront_distribution.s3_distribution"
resource.Test(t, resource.TestCase{
@ -15,7 +20,7 @@ func TestAccAWSCloudFrontDistribution_importBasic(t *testing.T) {
CheckDestroy: testAccCheckCloudFrontDistributionDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFrontDistributionS3Config,
Config: testConfig,
},
resource.TestStep{
ResourceName: resourceName,

View File

@ -0,0 +1,28 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSDBEventSubscription_importBasic(t *testing.T) {
resourceName := "aws_db_event_subscription.bar"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBEventSubscriptionDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSDBEventSubscriptionConfig,
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,37 @@
package aws
import (
"os"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSElasticacheReplicationGroup_importBasic(t *testing.T) {
oldvar := os.Getenv("AWS_DEFAULT_REGION")
os.Setenv("AWS_DEFAULT_REGION", "us-east-1")
defer os.Setenv("AWS_DEFAULT_REGION", oldvar)
name := acctest.RandString(10)
resourceName := "aws_elasticache_replication_group.bar"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSElasticacheReplicationGroupConfig(name),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"apply_immediately"}, //not in the API
},
},
})
}

View File

@ -1,21 +1,25 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSUser_importBasic(t *testing.T) {
resourceName := "aws_iam_user.user"
n := fmt.Sprintf("test-user-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSUserConfig,
Config: testAccAWSUserConfig(n, "/"),
},
resource.TestStep{

View File

@ -0,0 +1,31 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSOpsworksCustomLayerImportBasic(t *testing.T) {
name := acctest.RandString(10)
resourceName := "aws_opsworks_custom_layer.tf-acc"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsOpsworksCustomLayerConfigVpcCreate(name),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,31 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSOpsworksStackImportBasic(t *testing.T) {
name := acctest.RandString(10)
resourceName := "aws_opsworks_stack.tf-acc"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsOpsworksStackConfigVpcCreate(name),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -40,6 +40,12 @@ func resourceAwsRouteTableImportState(
continue
}
if route.DestinationPrefixListId != nil {
// Skipping because VPC endpoint routes are handled separately
// See aws_vpc_endpoint
continue
}
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_route")

View File

@ -35,3 +35,136 @@ func TestAccAWSRouteTable_importBasic(t *testing.T) {
},
})
}
func TestAccAWSRouteTable_complex(t *testing.T) {
checkFn := func(s []*terraform.InstanceState) error {
// Expect 3: group, 2 rules
if len(s) != 3 {
return fmt.Errorf("bad states: %#v", s)
}
return nil
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckRouteTableDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccRouteTableConfig_complexImport,
},
resource.TestStep{
ResourceName: "aws_route_table.mod",
ImportState: true,
ImportStateCheck: checkFn,
},
},
})
}
const testAccRouteTableConfig_complexImport = `
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
tags {
Name = "tf-rt-import-test"
}
}
resource "aws_subnet" "tf_test_subnet" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.0.0/24"
map_public_ip_on_launch = true
tags {
Name = "tf-rt-import-test"
}
}
resource "aws_eip" "nat" {
vpc = true
associate_with_private_ip = "10.0.0.10"
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.default.id}"
tags {
Name = "tf-rt-import-test"
}
}
variable "private_subnet_cidrs" {
default = "10.0.0.0/24"
}
resource "aws_nat_gateway" "nat" {
count = "${length(split(",", var.private_subnet_cidrs))}"
allocation_id = "${element(aws_eip.nat.*.id, count.index)}"
subnet_id = "${aws_subnet.tf_test_subnet.id}"
}
resource "aws_route_table" "mod" {
count = "${length(split(",", var.private_subnet_cidrs))}"
vpc_id = "${aws_vpc.default.id}"
tags {
Name = "tf-rt-import-test"
}
depends_on = ["aws_internet_gateway.ogw", "aws_internet_gateway.gw"]
}
resource "aws_route" "mod-1" {
route_table_id = "${aws_route_table.mod.id}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.nat.*.id, count.index)}"
}
resource "aws_route" "mod" {
route_table_id = "${aws_route_table.mod.id}"
destination_cidr_block = "10.181.0.0/16"
vpc_peering_connection_id = "${aws_vpc_peering_connection.foo.id}"
}
resource "aws_vpc_endpoint" "s3" {
vpc_id = "${aws_vpc.default.id}"
service_name = "com.amazonaws.us-west-2.s3"
route_table_ids = ["${aws_route_table.mod.*.id}"]
}
### vpc bar
resource "aws_vpc" "bar" {
cidr_block = "10.1.0.0/16"
tags {
Name = "tf-rt-import-test"
}
}
resource "aws_internet_gateway" "ogw" {
vpc_id = "${aws_vpc.bar.id}"
tags {
Name = "tf-rt-import-test"
}
}
### vpc peer connection
resource "aws_vpc_peering_connection" "foo" {
vpc_id = "${aws_vpc.default.id}"
peer_vpc_id = "${aws_vpc.bar.id}"
peer_owner_id = "187416307283"
tags {
Name = "tf-rt-import-test"
}
auto_accept = true
}
`

View File

@ -82,6 +82,8 @@ func protocolIntegers() map[string]int {
var protocolIntegers = make(map[string]int)
protocolIntegers = map[string]int{
// defined at https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
"ah": 51,
"esp": 50,
"udp": 17,
"tcp": 6,
"icmp": 1,

View File

@ -251,6 +251,9 @@ func (lt *opsworksLayerType) SchemaResource() *schema.Resource {
client := meta.(*AWSClient).opsworksconn
return lt.Delete(d, client)
},
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: resourceSchema,
}

View File

@ -144,15 +144,22 @@ func Provider() terraform.ResourceProvider {
DataSourcesMap: map[string]*schema.Resource{
"aws_ami": dataSourceAwsAmi(),
"aws_availability_zone": dataSourceAwsAvailabilityZone(),
"aws_availability_zones": dataSourceAwsAvailabilityZones(),
"aws_billing_service_account": dataSourceAwsBillingServiceAccount(),
"aws_caller_identity": dataSourceAwsCallerIdentity(),
"aws_cloudformation_stack": dataSourceAwsCloudFormationStack(),
"aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(),
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
"aws_iam_policy_document": dataSourceAwsIamPolicyDocument(),
"aws_ip_ranges": dataSourceAwsIPRanges(),
"aws_prefix_list": dataSourceAwsPrefixList(),
"aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(),
"aws_region": dataSourceAwsRegion(),
"aws_s3_bucket_object": dataSourceAwsS3BucketObject(),
"aws_subnet": dataSourceAwsSubnet(),
"aws_security_group": dataSourceAwsSecurityGroup(),
"aws_vpc": dataSourceAwsVpc(),
},
ResourcesMap: map[string]*schema.Resource{
@ -234,6 +241,8 @@ func Provider() terraform.ResourceProvider {
"aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(),
"aws_elb": resourceAwsElb(),
"aws_elb_attachment": resourceAwsElbAttachment(),
"aws_emr_cluster": resourceAwsEMRCluster(),
"aws_emr_instance_group": resourceAwsEMRInstanceGroup(),
"aws_flow_log": resourceAwsFlowLog(),
"aws_glacier_vault": resourceAwsGlacierVault(),
"aws_iam_access_key": resourceAwsIamAccessKey(),
@ -254,6 +263,7 @@ func Provider() terraform.ResourceProvider {
"aws_iam_user_policy": resourceAwsIamUserPolicy(),
"aws_iam_user_ssh_key": resourceAwsIamUserSshKey(),
"aws_iam_user": resourceAwsIamUser(),
"aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(),
"aws_instance": resourceAwsInstance(),
"aws_internet_gateway": resourceAwsInternetGateway(),
"aws_key_pair": resourceAwsKeyPair(),
@ -322,6 +332,7 @@ func Provider() terraform.ResourceProvider {
"aws_security_group": resourceAwsSecurityGroup(),
"aws_security_group_rule": resourceAwsSecurityGroupRule(),
"aws_simpledb_domain": resourceAwsSimpleDBDomain(),
"aws_ssm_activation": resourceAwsSsmActivation(),
"aws_ssm_association": resourceAwsSsmAssociation(),
"aws_ssm_document": resourceAwsSsmDocument(),
"aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(),
@ -343,6 +354,13 @@ func Provider() terraform.ResourceProvider {
"aws_vpn_connection_route": resourceAwsVpnConnectionRoute(),
"aws_vpn_gateway": resourceAwsVpnGateway(),
"aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(),
"aws_waf_byte_match_set": resourceAwsWafByteMatchSet(),
"aws_waf_ipset": resourceAwsWafIPSet(),
"aws_waf_rule": resourceAwsWafRule(),
"aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(),
"aws_waf_web_acl": resourceAwsWafWebAcl(),
"aws_waf_xss_match_set": resourceAwsWafXssMatchSet(),
"aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(),
},
ConfigureFunc: providerConfigure,
}

View File

@ -88,6 +88,11 @@ func resourceAwsAlb() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
},
},
@ -229,6 +234,8 @@ func resourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error {
accessLogMap := map[string]interface{}{}
for _, attr := range attributesResp.Attributes {
switch *attr.Key {
case "access_logs.s3.enabled":
accessLogMap["enabled"] = *attr.Value
case "access_logs.s3.bucket":
accessLogMap["bucket"] = *attr.Value
case "access_logs.s3.prefix":
@ -276,7 +283,7 @@ func resourceAwsAlbUpdate(d *schema.ResourceData, meta interface{}) error {
attributes = append(attributes,
&elbv2.LoadBalancerAttribute{
Key: aws.String("access_logs.s3.enabled"),
Value: aws.String("true"),
Value: aws.String(strconv.FormatBool(log["enabled"].(bool))),
},
&elbv2.LoadBalancerAttribute{
Key: aws.String("access_logs.s3.bucket"),

View File

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"log"
"regexp"
"strconv"
"strings"
@ -30,6 +31,11 @@ func resourceAwsAlbTargetGroup() *schema.Resource {
Computed: true,
},
"arn_suffix": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
@ -218,6 +224,7 @@ func resourceAwsAlbTargetGroupRead(d *schema.ResourceData, meta interface{}) err
targetGroup := resp.TargetGroups[0]
d.Set("arn", targetGroup.TargetGroupArn)
d.Set("arn_suffix", albTargetGroupSuffixFromARN(targetGroup.TargetGroupArn))
d.Set("name", targetGroup.TargetGroupName)
d.Set("port", targetGroup.Port)
d.Set("protocol", targetGroup.Protocol)
@ -463,3 +470,17 @@ func validateAwsAlbTargetGroupStickinessCookieDuration(v interface{}, k string)
}
return
}
func albTargetGroupSuffixFromARN(arn *string) string {
if arn == nil {
return ""
}
if arnComponents := regexp.MustCompile(`arn:.*:targetgroup/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 {
if len(arnComponents[0]) == 2 {
return arnComponents[0][1]
}
}
return ""
}

View File

@ -13,6 +13,37 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestALBTargetGroupCloudwatchSuffixFromARN(t *testing.T) {
cases := []struct {
name string
arn *string
suffix string
}{
{
name: "valid suffix",
arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:targetgroup/my-targets/73e2d6bc24d8a067`),
suffix: `my-targets/73e2d6bc24d8a067`,
},
{
name: "no suffix",
arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:targetgroup`),
suffix: ``,
},
{
name: "nil ARN",
arn: nil,
suffix: ``,
},
}
for _, tc := range cases {
actual := albTargetGroupSuffixFromARN(tc.arn)
if actual != tc.suffix {
t.Fatalf("bad suffix: %q\nExpected: %s\n Got: %s", tc.name, tc.suffix, actual)
}
}
}
func TestAccAWSALBTargetGroup_basic(t *testing.T) {
var conf elbv2.TargetGroup
targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum))

View File

@ -213,9 +213,8 @@ func TestAccAWSALB_accesslogs(t *testing.T) {
resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"),
),
},
{
Config: testAccAWSALBConfig_accessLogs(albName, bucketName),
Config: testAccAWSALBConfig_accessLogs(true, albName, bucketName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckAWSALBExists("aws_alb.alb_test", &conf),
resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName),
@ -232,6 +231,27 @@ func TestAccAWSALB_accesslogs(t *testing.T) {
resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.#", "1"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.bucket", bucketName),
resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.prefix", "testAccAWSALBConfig_accessLogs"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.enabled", "true"),
resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"),
),
},
{
Config: testAccAWSALBConfig_accessLogs(false, albName, bucketName),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckAWSALBExists("aws_alb.alb_test", &conf),
resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName),
resource.TestCheckResourceAttr("aws_alb.alb_test", "internal", "false"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic1"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "enable_deletion_protection", "false"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "idle_timeout", "50"),
resource.TestCheckResourceAttrSet("aws_alb.alb_test", "vpc_id"),
resource.TestCheckResourceAttrSet("aws_alb.alb_test", "zone_id"),
resource.TestCheckResourceAttrSet("aws_alb.alb_test", "dns_name"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.#", "1"),
resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.enabled", "false"),
resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"),
),
},
@ -569,7 +589,7 @@ resource "aws_security_group" "alb_test" {
}`, albName)
}
func testAccAWSALBConfig_accessLogs(albName, bucketName string) string {
func testAccAWSALBConfig_accessLogs(enabled bool, albName, bucketName string) string {
return fmt.Sprintf(`resource "aws_alb" "alb_test" {
name = "%s"
internal = false
@ -582,6 +602,7 @@ func testAccAWSALBConfig_accessLogs(albName, bucketName string) string {
access_logs {
bucket = "${aws_s3_bucket.logs.bucket}"
prefix = "${var.bucket_prefix}"
enabled = "%t"
}
tags {
@ -676,7 +697,7 @@ resource "aws_security_group" "alb_test" {
tags {
TestName = "TestAccAWSALB_basic"
}
}`, albName, bucketName)
}`, albName, enabled, bucketName)
}
func testAccAWSALBConfig_nosg(albName string) string {

View File

@ -121,6 +121,12 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
res, err := client.DescribeImages(req)
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
log.Printf("[DEBUG] %s no longer exists, so we'll drop it from the state", id)
d.SetId("")
return nil
}
return err
}

View File

@ -52,6 +52,11 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
Computed: true,
},
"certificate_upload_date": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"cloudfront_zone_id": &schema.Schema{
Type: schema.TypeString,
Computed: true,

View File

@ -41,6 +41,9 @@ func TestAccAWSAPIGatewayDomainName_basic(t *testing.T) {
resource.TestCheckResourceAttr(
"aws_api_gateway_domain_name.test", "domain_name", name,
),
resource.TestCheckResourceAttrSet(
"aws_api_gateway_domain_name.test", "certificate_upload_date",
),
),
},
},

View File

@ -41,16 +41,9 @@ func resourceAwsApiGatewayIntegration() *schema.Resource {
},
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value != "MOCK" && value != "AWS" && value != "HTTP" {
errors = append(errors, fmt.Errorf(
"%q must be one of 'AWS', 'MOCK', 'HTTP'", k))
}
return
},
Type: schema.TypeString,
Required: true,
ValidateFunc: validateApiGatewayIntegrationType,
},
"uri": &schema.Schema{

View File

@ -284,7 +284,7 @@ resource "aws_ecs_service" "service" {
name = "foobar"
cluster = "${aws_ecs_cluster.foo.id}"
task_definition = "${aws_ecs_task_definition.task.arn}"
desired_count = 1
desired_count = 2
deployment_maximum_percent = 200
deployment_minimum_healthy_percent = 50

View File

@ -218,7 +218,8 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling
params.PolicyType = aws.String(v.(string))
}
if v, ok := d.GetOk("scaling_adjustment"); ok {
//if policy_type=="SimpleScaling" then scaling_adjustment is required and 0 is allowed
if v, ok := d.GetOk("scaling_adjustment"); ok || *params.PolicyType == "SimpleScaling" {
params.ScalingAdjustment = aws.Int64(int64(v.(int)))
}

View File

@ -316,3 +316,62 @@ resource "aws_autoscaling_policy" "foobar_simple" {
}
`, name, name, name)
}
func TestAccAWSAutoscalingPolicy_SimpleScalingStepAdjustment(t *testing.T) {
var policy autoscaling.ScalingPolicy
name := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoscalingPolicyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoscalingPolicyConfig_SimpleScalingStepAdjustment(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckScalingPolicyExists("aws_autoscaling_policy.foobar_simple", &policy),
resource.TestCheckResourceAttr("aws_autoscaling_policy.foobar_simple", "adjustment_type", "ExactCapacity"),
resource.TestCheckResourceAttr("aws_autoscaling_policy.foobar_simple", "scaling_adjustment", "0"),
),
},
},
})
}
func testAccAWSAutoscalingPolicyConfig_SimpleScalingStepAdjustment(name string) string {
return fmt.Sprintf(`
resource "aws_launch_configuration" "foobar" {
name = "tf-test-%s"
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "foobar" {
availability_zones = ["us-west-2a"]
name = "terraform-test-%s"
max_size = 5
min_size = 0
health_check_grace_period = 300
health_check_type = "ELB"
force_delete = true
termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
resource "aws_autoscaling_policy" "foobar_simple" {
name = "foobar_simple_%s"
adjustment_type = "ExactCapacity"
cooldown = 300
policy_type = "SimpleScaling"
scaling_adjustment = 0
autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
}
`, name, name, name)
}

View File

@ -113,11 +113,17 @@ func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface
}
func resourceAwsAutoscalingScheduleRead(d *schema.ResourceData, meta interface{}) error {
sa, err := resourceAwsASGScheduledActionRetrieve(d, meta)
sa, err, exists := resourceAwsASGScheduledActionRetrieve(d, meta)
if err != nil {
return err
}
if !exists {
log.Printf("Error retrieving Autoscaling Scheduled Actions. Removing from state")
d.SetId("")
return nil
}
d.Set("autoscaling_group_name", sa.AutoScalingGroupName)
d.Set("arn", sa.ScheduledActionARN)
d.Set("desired_capacity", sa.DesiredCapacity)
@ -153,7 +159,7 @@ func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface
return nil
}
func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interface{}) (*autoscaling.ScheduledUpdateGroupAction, error) {
func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interface{}) (*autoscaling.ScheduledUpdateGroupAction, error, bool) {
autoscalingconn := meta.(*AWSClient).autoscalingconn
params := &autoscaling.DescribeScheduledActionsInput{
@ -164,13 +170,13 @@ func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interfac
log.Printf("[INFO] Describing Autoscaling Scheduled Action: %+v", params)
actions, err := autoscalingconn.DescribeScheduledActions(params)
if err != nil {
return nil, fmt.Errorf("Error retrieving Autoscaling Scheduled Actions: %s", err)
return nil, fmt.Errorf("Error retrieving Autoscaling Scheduled Actions: %s", err), false
}
if len(actions.ScheduledUpdateGroupActions) != 1 ||
*actions.ScheduledUpdateGroupActions[0].ScheduledActionName != d.Id() {
return nil, fmt.Errorf("Unable to find Autoscaling Scheduled Action: %#v", actions.ScheduledUpdateGroupActions)
return nil, nil, false
}
return actions.ScheduledUpdateGroupActions[0], nil
return actions.ScheduledUpdateGroupActions[0], nil, true
}

View File

@ -28,6 +28,40 @@ func TestAccAWSAutoscalingSchedule_basic(t *testing.T) {
})
}
func TestAccAWSAutoscalingSchedule_disappears(t *testing.T) {
var schedule autoscaling.ScheduledUpdateGroupAction
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoscalingScheduleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoscalingScheduleConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckScalingScheduleExists("aws_autoscaling_schedule.foobar", &schedule),
testAccCheckScalingScheduleDisappears(&schedule),
),
ExpectNonEmptyPlan: true,
},
},
})
}
func testAccCheckScalingScheduleDisappears(schedule *autoscaling.ScheduledUpdateGroupAction) resource.TestCheckFunc {
return func(s *terraform.State) error {
autoscalingconn := testAccProvider.Meta().(*AWSClient).autoscalingconn
params := &autoscaling.DeleteScheduledActionInput{
AutoScalingGroupName: schedule.AutoScalingGroupName,
ScheduledActionName: schedule.ScheduledActionName,
}
if _, err := autoscalingconn.DeleteScheduledAction(params); err != nil {
return err
}
return nil
}
}
func TestAccAWSAutoscalingSchedule_recurrence(t *testing.T) {
var schedule autoscaling.ScheduledUpdateGroupAction
@ -87,6 +121,8 @@ func testAccCheckScalingScheduleExists(n string, policy *autoscaling.ScheduledUp
return fmt.Errorf("Scaling Schedule not found")
}
*policy = *resp.ScheduledUpdateGroupActions[0]
return nil
}
}

View File

@ -7,6 +7,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudfront"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -26,56 +27,56 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"aliases": &schema.Schema{
"aliases": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: aliasesHash,
},
"cache_behavior": &schema.Schema{
"cache_behavior": {
Type: schema.TypeSet,
Optional: true,
Set: cacheBehaviorHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"allowed_methods": &schema.Schema{
"allowed_methods": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"cached_methods": &schema.Schema{
"cached_methods": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"compress": &schema.Schema{
"compress": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"default_ttl": &schema.Schema{
"default_ttl": {
Type: schema.TypeInt,
Required: true,
},
"forwarded_values": &schema.Schema{
"forwarded_values": {
Type: schema.TypeSet,
Required: true,
Set: forwardedValuesHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cookies": &schema.Schema{
"cookies": {
Type: schema.TypeSet,
Required: true,
Set: cookiePreferenceHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"forward": &schema.Schema{
"forward": {
Type: schema.TypeString,
Required: true,
},
"whitelisted_names": &schema.Schema{
"whitelisted_names": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
@ -83,16 +84,16 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"headers": &schema.Schema{
"headers": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"query_string": &schema.Schema{
"query_string": {
Type: schema.TypeBool,
Required: true,
},
"query_string_cache_keys": &schema.Schema{
"query_string_cache_keys": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
@ -100,112 +101,112 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"max_ttl": &schema.Schema{
"max_ttl": {
Type: schema.TypeInt,
Required: true,
},
"min_ttl": &schema.Schema{
"min_ttl": {
Type: schema.TypeInt,
Required: true,
},
"path_pattern": &schema.Schema{
"path_pattern": {
Type: schema.TypeString,
Required: true,
},
"smooth_streaming": &schema.Schema{
"smooth_streaming": {
Type: schema.TypeBool,
Optional: true,
},
"target_origin_id": &schema.Schema{
"target_origin_id": {
Type: schema.TypeString,
Required: true,
},
"trusted_signers": &schema.Schema{
"trusted_signers": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"viewer_protocol_policy": &schema.Schema{
"viewer_protocol_policy": {
Type: schema.TypeString,
Required: true,
},
},
},
},
"comment": &schema.Schema{
"comment": {
Type: schema.TypeString,
Optional: true,
},
"custom_error_response": &schema.Schema{
"custom_error_response": {
Type: schema.TypeSet,
Optional: true,
Set: customErrorResponseHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"error_caching_min_ttl": &schema.Schema{
"error_caching_min_ttl": {
Type: schema.TypeInt,
Optional: true,
},
"error_code": &schema.Schema{
"error_code": {
Type: schema.TypeInt,
Required: true,
},
"response_code": &schema.Schema{
"response_code": {
Type: schema.TypeInt,
Optional: true,
},
"response_page_path": &schema.Schema{
"response_page_path": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
"default_cache_behavior": &schema.Schema{
"default_cache_behavior": {
Type: schema.TypeSet,
Required: true,
Set: defaultCacheBehaviorHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"allowed_methods": &schema.Schema{
"allowed_methods": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"cached_methods": &schema.Schema{
"cached_methods": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"compress": &schema.Schema{
"compress": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"default_ttl": &schema.Schema{
"default_ttl": {
Type: schema.TypeInt,
Required: true,
},
"forwarded_values": &schema.Schema{
"forwarded_values": {
Type: schema.TypeSet,
Required: true,
Set: forwardedValuesHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cookies": &schema.Schema{
"cookies": {
Type: schema.TypeSet,
Optional: true,
Set: cookiePreferenceHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"forward": &schema.Schema{
"forward": {
Type: schema.TypeString,
Required: true,
},
"whitelisted_names": &schema.Schema{
"whitelisted_names": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
@ -213,16 +214,16 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"headers": &schema.Schema{
"headers": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"query_string": &schema.Schema{
"query_string": {
Type: schema.TypeBool,
Required: true,
},
"query_string_cache_keys": &schema.Schema{
"query_string_cache_keys": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
@ -230,65 +231,65 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"max_ttl": &schema.Schema{
"max_ttl": {
Type: schema.TypeInt,
Required: true,
},
"min_ttl": &schema.Schema{
"min_ttl": {
Type: schema.TypeInt,
Required: true,
},
"smooth_streaming": &schema.Schema{
"smooth_streaming": {
Type: schema.TypeBool,
Optional: true,
},
"target_origin_id": &schema.Schema{
"target_origin_id": {
Type: schema.TypeString,
Required: true,
},
"trusted_signers": &schema.Schema{
"trusted_signers": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"viewer_protocol_policy": &schema.Schema{
"viewer_protocol_policy": {
Type: schema.TypeString,
Required: true,
},
},
},
},
"default_root_object": &schema.Schema{
"default_root_object": {
Type: schema.TypeString,
Optional: true,
},
"enabled": &schema.Schema{
"enabled": {
Type: schema.TypeBool,
Required: true,
},
"http_version": &schema.Schema{
"http_version": {
Type: schema.TypeString,
Optional: true,
Default: "http2",
ValidateFunc: validateHTTP,
},
"logging_config": &schema.Schema{
"logging_config": {
Type: schema.TypeSet,
Optional: true,
Set: loggingConfigHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bucket": &schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
},
"include_cookies": &schema.Schema{
"include_cookies": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"prefix": &schema.Schema{
"prefix": {
Type: schema.TypeString,
Optional: true,
Default: "",
@ -296,13 +297,13 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"origin": &schema.Schema{
"origin": {
Type: schema.TypeSet,
Required: true,
Set: originHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"custom_origin_config": &schema.Schema{
"custom_origin_config": {
Type: schema.TypeSet,
Optional: true,
ConflictsWith: []string{"origin.s3_origin_config"},
@ -310,19 +311,19 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"http_port": &schema.Schema{
"http_port": {
Type: schema.TypeInt,
Required: true,
},
"https_port": &schema.Schema{
"https_port": {
Type: schema.TypeInt,
Required: true,
},
"origin_protocol_policy": &schema.Schema{
"origin_protocol_policy": {
Type: schema.TypeString,
Required: true,
},
"origin_ssl_protocols": &schema.Schema{
"origin_ssl_protocols": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
@ -330,36 +331,36 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"domain_name": &schema.Schema{
"domain_name": {
Type: schema.TypeString,
Required: true,
},
"custom_header": &schema.Schema{
"custom_header": {
Type: schema.TypeSet,
Optional: true,
Set: originCustomHeaderHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"value": &schema.Schema{
"value": {
Type: schema.TypeString,
Required: true,
},
},
},
},
"origin_id": &schema.Schema{
"origin_id": {
Type: schema.TypeString,
Required: true,
},
"origin_path": &schema.Schema{
"origin_path": {
Type: schema.TypeString,
Optional: true,
},
"s3_origin_config": &schema.Schema{
"s3_origin_config": {
Type: schema.TypeSet,
Optional: true,
ConflictsWith: []string{"origin.custom_origin_config"},
@ -367,7 +368,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"origin_access_identity": &schema.Schema{
"origin_access_identity": {
Type: schema.TypeString,
Required: true,
},
@ -377,31 +378,31 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"price_class": &schema.Schema{
"price_class": {
Type: schema.TypeString,
Optional: true,
Default: "PriceClass_All",
},
"restrictions": &schema.Schema{
"restrictions": {
Type: schema.TypeSet,
Required: true,
Set: restrictionsHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"geo_restriction": &schema.Schema{
"geo_restriction": {
Type: schema.TypeSet,
Required: true,
Set: geoRestrictionHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"locations": &schema.Schema{
"locations": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"restriction_type": &schema.Schema{
"restriction_type": {
Type: schema.TypeString,
Required: true,
},
@ -411,80 +412,80 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
},
},
},
"viewer_certificate": &schema.Schema{
"viewer_certificate": {
Type: schema.TypeSet,
Required: true,
Set: viewerCertificateHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"acm_certificate_arn": &schema.Schema{
"acm_certificate_arn": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"viewer_certificate.cloudfront_default_certificate", "viewer_certificate.iam_certificate_id"},
},
"cloudfront_default_certificate": &schema.Schema{
"cloudfront_default_certificate": {
Type: schema.TypeBool,
Optional: true,
ConflictsWith: []string{"viewer_certificate.acm_certificate_arn", "viewer_certificate.iam_certificate_id"},
},
"iam_certificate_id": &schema.Schema{
"iam_certificate_id": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"viewer_certificate.acm_certificate_arn", "viewer_certificate.cloudfront_default_certificate"},
},
"minimum_protocol_version": &schema.Schema{
"minimum_protocol_version": {
Type: schema.TypeString,
Optional: true,
Default: "SSLv3",
},
"ssl_support_method": &schema.Schema{
"ssl_support_method": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
"web_acl_id": &schema.Schema{
"web_acl_id": {
Type: schema.TypeString,
Optional: true,
},
"caller_reference": &schema.Schema{
"caller_reference": {
Type: schema.TypeString,
Computed: true,
},
"status": &schema.Schema{
"status": {
Type: schema.TypeString,
Computed: true,
},
"active_trusted_signers": &schema.Schema{
"active_trusted_signers": {
Type: schema.TypeMap,
Computed: true,
},
"domain_name": &schema.Schema{
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"last_modified_time": &schema.Schema{
"last_modified_time": {
Type: schema.TypeString,
Computed: true,
},
"in_progress_validation_batches": &schema.Schema{
"in_progress_validation_batches": {
Type: schema.TypeInt,
Computed: true,
},
"etag": &schema.Schema{
"etag": {
Type: schema.TypeString,
Computed: true,
},
"hosted_zone_id": &schema.Schema{
"hosted_zone_id": {
Type: schema.TypeString,
Computed: true,
},
// retain_on_delete is a non-API attribute that may help facilitate speedy
// deletion of a resoruce. It's mainly here for testing purposes, so
// enable at your own risk.
"retain_on_delete": &schema.Schema{
"retain_on_delete": {
Type: schema.TypeBool,
Optional: true,
Default: false,
@ -542,17 +543,18 @@ func resourceAwsCloudFrontDistributionRead(d *schema.ResourceData, meta interfac
d.Set("etag", resp.ETag)
d.Set("arn", resp.Distribution.ARN)
cloudFrontArn := resp.Distribution.ARN
tagResp, tagErr := conn.ListTagsForResource(&cloudfront.ListTagsForResourceInput{
Resource: cloudFrontArn,
tagResp, err := conn.ListTagsForResource(&cloudfront.ListTagsForResourceInput{
Resource: aws.String(d.Get("arn").(string)),
})
if tagErr != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", cloudFrontArn)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf(
"Error retrieving EC2 tags for CloudFront Distribution %q (ARN: %q): {{err}}",
d.Id(), d.Get("arn").(string)), err)
}
if tagResp != nil {
d.Set("tags", tagsToMapCloudFront(tagResp.Tags))
if err := d.Set("tags", tagsToMapCloudFront(tagResp.Tags)); err != nil {
return err
}
return nil
@ -589,7 +591,7 @@ func resourceAwsCloudFrontDistributionDelete(d *schema.ResourceData, meta interf
// skip delete if retain_on_delete is enabled
if d.Get("retain_on_delete").(bool) {
log.Printf("[WARN] Removing Distributions ID %s with retain_on_delete set. Please delete this distribution manually.", d.Id())
log.Printf("[WARN] Removing CloudFront Distribution ID %q with `retain_on_delete` set. Please delete this distribution manually.", d.Id())
d.SetId("")
return nil
}
@ -643,7 +645,7 @@ func resourceAwsCloudFrontWebDistributionStateRefreshFunc(id string, meta interf
resp, err := conn.GetDistribution(params)
if err != nil {
log.Printf("Error on retrieving CloudFront distribution when waiting: %s", err)
log.Printf("[WARN] Error retrieving CloudFront Distribution %q details: %s", id, err)
return nil, "", err
}
@ -659,15 +661,11 @@ func resourceAwsCloudFrontWebDistributionStateRefreshFunc(id string, meta interf
// correct.
func validateHTTP(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
found := false
for _, w := range []string{"http1.1", "http2"} {
if value == w {
found = true
}
}
if found == false {
if value != "http1.1" && value != "http2" {
errors = append(errors, fmt.Errorf(
"HTTP version parameter must be one of http1.1 or http2"))
"%q contains an invalid HTTP version parameter %q. Valid parameters are either %q or %q.",
k, value, "http1.1", "http2"))
}
return
}

View File

@ -20,13 +20,15 @@ import (
// If you are testing manually and can't wait for deletion, set the
// TF_TEST_CLOUDFRONT_RETAIN environment variable.
func TestAccAWSCloudFrontDistribution_S3Origin(t *testing.T) {
ri := acctest.RandInt()
testConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3Config, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckCloudFrontDistributionDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFrontDistributionS3Config,
Config: testConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFrontDistributionExistence(
"aws_cloudfront_distribution.s3_distribution",
@ -44,8 +46,8 @@ func TestAccAWSCloudFrontDistribution_S3Origin(t *testing.T) {
func TestAccAWSCloudFrontDistribution_S3OriginWithTags(t *testing.T) {
ri := acctest.RandInt()
preConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3ConfigWithTags, ri, testAccAWSCloudFrontDistributionRetainConfig())
postConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3ConfigWithTagsUpdated, ri, testAccAWSCloudFrontDistributionRetainConfig())
preConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3ConfigWithTags, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig())
postConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3ConfigWithTagsUpdated, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -193,6 +195,23 @@ func TestAccAWSCloudFrontDistribution_noCustomErrorResponseConfig(t *testing.T)
})
}
func TestResourceAWSCloudFrontDistribution_validateHTTP(t *testing.T) {
var value string
var errors []error
value = "incorrect"
_, errors = validateHTTP(value, "http_version")
if len(errors) == 0 {
t.Fatalf("Expected %q to trigger a validation error", value)
}
value = "http1.1"
_, errors = validateHTTP(value, "http_version")
if len(errors) != 0 {
t.Fatalf("Expected %q not to trigger a validation error", value)
}
}
func testAccCheckCloudFrontDistributionDestroy(s *terraform.State) error {
for k, rs := range s.RootModule().Resources {
if rs.Type != "aws_cloudfront_distribution" {
@ -251,26 +270,41 @@ func testAccAWSCloudFrontDistributionRetainConfig() string {
return ""
}
var testAccAWSCloudFrontDistributionS3Config = fmt.Sprintf(`
var originBucket = fmt.Sprintf(`
resource "aws_s3_bucket" "s3_bucket_origin" {
bucket = "mybucket.${var.rand_id}"
acl = "public-read"
}
`)
var logBucket = fmt.Sprintf(`
resource "aws_s3_bucket" "s3_bucket_logs" {
bucket = "mylogs.${var.rand_id}"
acl = "public-read"
}
`)
var testAccAWSCloudFrontDistributionS3Config = `
variable rand_id {
default = %d
}
resource "aws_s3_bucket" "s3_bucket" {
bucket = "mybucket.${var.rand_id}.s3.amazonaws.com"
acl = "public-read"
}
# origin bucket
%s
# log bucket
%s
resource "aws_cloudfront_distribution" "s3_distribution" {
origin {
domain_name = "${aws_s3_bucket.s3_bucket.id}"
domain_name = "${aws_s3_bucket.s3_bucket_origin.id}.s3.amazonaws.com"
origin_id = "myS3Origin"
}
enabled = true
default_root_object = "index.html"
logging_config {
include_cookies = false
bucket = "mylogs.${var.rand_id}.s3.amazonaws.com"
bucket = "${aws_s3_bucket.s3_bucket_logs.id}.s3.amazonaws.com"
prefix = "myprefix"
}
aliases = [ "mysite.${var.rand_id}.example.com", "yoursite.${var.rand_id}.example.com" ]
@ -301,21 +335,22 @@ resource "aws_cloudfront_distribution" "s3_distribution" {
}
%s
}
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int(), testAccAWSCloudFrontDistributionRetainConfig())
`
var testAccAWSCloudFrontDistributionS3ConfigWithTags = `
variable rand_id {
default = %d
}
resource "aws_s3_bucket" "s3_bucket" {
bucket = "mybucket.${var.rand_id}.s3.amazonaws.com"
acl = "public-read"
}
# origin bucket
%s
# log bucket
%s
resource "aws_cloudfront_distribution" "s3_distribution" {
origin {
domain_name = "${aws_s3_bucket.s3_bucket.id}"
domain_name = "${aws_s3_bucket.s3_bucket_origin.id}.s3.amazonaws.com"
origin_id = "myS3Origin"
}
enabled = true
@ -359,14 +394,15 @@ variable rand_id {
default = %d
}
resource "aws_s3_bucket" "s3_bucket" {
bucket = "mybucket.${var.rand_id}.s3.amazonaws.com"
acl = "public-read"
}
# origin bucket
%s
# log bucket
%s
resource "aws_cloudfront_distribution" "s3_distribution" {
origin {
domain_name = "${aws_s3_bucket.s3_bucket.id}"
domain_name = "${aws_s3_bucket.s3_bucket_origin.id}.s3.amazonaws.com"
origin_id = "myS3Origin"
}
enabled = true
@ -409,6 +445,9 @@ variable rand_id {
default = %d
}
# log bucket
%s
resource "aws_cloudfront_distribution" "custom_distribution" {
origin {
domain_name = "www.example.com"
@ -425,7 +464,7 @@ resource "aws_cloudfront_distribution" "custom_distribution" {
default_root_object = "index.html"
logging_config {
include_cookies = false
bucket = "mylogs.${var.rand_id}.s3.amazonaws.com"
bucket = "${aws_s3_bucket.s3_bucket_logs.id}.s3.amazonaws.com"
prefix = "myprefix"
}
aliases = [ "mysite.${var.rand_id}.example.com", "*.yoursite.${var.rand_id}.example.com" ]
@ -457,21 +496,22 @@ resource "aws_cloudfront_distribution" "custom_distribution" {
}
%s
}
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int(), testAccAWSCloudFrontDistributionRetainConfig())
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int(), logBucket, testAccAWSCloudFrontDistributionRetainConfig())
var testAccAWSCloudFrontDistributionMultiOriginConfig = fmt.Sprintf(`
variable rand_id {
default = %d
}
resource "aws_s3_bucket" "s3_bucket" {
bucket = "mybucket.${var.rand_id}.s3.amazonaws.com"
acl = "public-read"
}
# origin bucket
%s
# log bucket
%s
resource "aws_cloudfront_distribution" "multi_origin_distribution" {
origin {
domain_name = "${aws_s3_bucket.s3_bucket.id}"
domain_name = "${aws_s3_bucket.s3_bucket_origin.id}.s3.amazonaws.com"
origin_id = "myS3Origin"
}
origin {
@ -489,7 +529,7 @@ resource "aws_cloudfront_distribution" "multi_origin_distribution" {
default_root_object = "index.html"
logging_config {
include_cookies = false
bucket = "mylogs.${var.rand_id}.s3.amazonaws.com"
bucket = "${aws_s3_bucket.s3_bucket_logs.id}.s3.amazonaws.com"
prefix = "myprefix"
}
aliases = [ "mysite.${var.rand_id}.example.com", "*.yoursite.${var.rand_id}.example.com" ]
@ -541,6 +581,7 @@ resource "aws_cloudfront_distribution" "multi_origin_distribution" {
viewer_protocol_policy = "allow-all"
path_pattern = "images2/*.jpg"
}
price_class = "PriceClass_All"
custom_error_response {
error_code = 404
@ -558,7 +599,7 @@ resource "aws_cloudfront_distribution" "multi_origin_distribution" {
}
%s
}
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int(), testAccAWSCloudFrontDistributionRetainConfig())
`, rand.New(rand.NewSource(time.Now().UnixNano())).Int(), originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig())
var testAccAWSCloudFrontDistributionNoCustomErroResponseInfo = fmt.Sprintf(`
variable rand_id {

View File

@ -200,5 +200,40 @@ func resourceAwsCustomerGatewayDelete(d *schema.ResourceData, meta interface{})
}
}
gatewayFilter := &ec2.Filter{
Name: aws.String("customer-gateway-id"),
Values: []*string{aws.String(d.Id())},
}
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{
Filters: []*ec2.Filter{gatewayFilter},
})
if err != nil {
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "InvalidCustomerGatewayID.NotFound" {
return nil
}
return resource.NonRetryableError(err)
}
if len(resp.CustomerGateways) != 1 {
return resource.RetryableError(fmt.Errorf("[ERROR] Error finding CustomerGateway for delete: %s", d.Id()))
}
switch *resp.CustomerGateways[0].State {
case "pending", "available", "deleting":
return resource.RetryableError(fmt.Errorf("[DEBUG] Gateway (%s) in state (%s), retrying", d.Id(), *resp.CustomerGateways[0].State))
case "deleted":
return nil
default:
return resource.RetryableError(fmt.Errorf("[DEBUG] Unrecognized state (%s) for Customer Gateway delete on (%s)", *resp.CustomerGateways[0].State, d.Id()))
}
})
if err != nil {
return err
}
return nil
}

View File

@ -18,6 +18,9 @@ func resourceAwsDbEventSubscription() *schema.Resource {
Read: resourceAwsDbEventSubscriptionRead,
Update: resourceAwsDbEventSubscriptionUpdate,
Delete: resourceAwsDbEventSubscriptionDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
@ -152,20 +155,23 @@ func resourceAwsDbEventSubscriptionRead(d *schema.ResourceData, meta interface{}
// list tags for resource
// set tags
conn := meta.(*AWSClient).rdsconn
arn := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).region)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if arn, err := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).region); err != nil {
log.Printf("[DEBUG] Error building ARN for RDS Event Subscription, not setting Tags for Event Subscription %s", *sub.CustSubscriptionId)
} else {
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
d.Set("tags", tagsToMapRDS(dt))
return nil
}
@ -258,11 +264,12 @@ func resourceAwsDbEventSubscriptionUpdate(d *schema.ResourceData, meta interface
d.SetPartial("source_type")
}
arn := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).region)
if err := setTagsRDS(rdsconn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
if arn, err := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(rdsconn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
if d.HasChange("source_ids") {
@ -369,7 +376,10 @@ func resourceAwsDbEventSubscriptionRefreshFunc(
}
}
func buildRDSEventSubscriptionARN(customerAwsId, subscriptionId, region string) string {
arn := fmt.Sprintf("arn:aws:rds:%s:%s:es:%s", region, customerAwsId, subscriptionId)
return arn
func buildRDSEventSubscriptionARN(customerAwsId, subscriptionId, partition, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition")
}
arn := fmt.Sprintf("arn:%s:rds:%s:%s:es:%s", partition, region, customerAwsId, subscriptionId)
return arn, nil
}

View File

@ -693,7 +693,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
// list tags for resource
// set tags
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
name := "<empty>"
if v.DBName != nil && *v.DBName != "" {
@ -976,7 +976,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
}
}
if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
@ -1052,11 +1052,13 @@ func resourceAwsDbInstanceStateRefreshFunc(
}
}
func buildRDSARN(identifier, accountid, region string) (string, error) {
func buildRDSARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:aws:rds:%s:%s:db:%s", region, accountid, identifier)
arn := fmt.Sprintf("arn:%s:rds:%s:%s:db:%s", partition, region, accountid, identifier)
return arn, nil
}

View File

@ -350,7 +350,7 @@ func testAccCheckAWSDBInstanceSnapshot(s *terraform.State) error {
}
} else { // snapshot was found,
// verify we have the tags copied to the snapshot
instanceARN, err := buildRDSARN(snapshot_identifier, testAccProvider.Meta().(*AWSClient).accountid, testAccProvider.Meta().(*AWSClient).region)
instanceARN, err := buildRDSARN(snapshot_identifier, testAccProvider.Meta().(*AWSClient).partition, testAccProvider.Meta().(*AWSClient).accountid, testAccProvider.Meta().(*AWSClient).region)
// tags have a different ARN, just swapping :db: for :snapshot:
tagsARN := strings.Replace(instanceARN, ":db:", ":snapshot:", 1)
if err != nil {

View File

@ -167,7 +167,7 @@ func resourceAwsDbOptionGroupRead(d *schema.ResourceData, meta interface{}) erro
}
optionGroup := options.OptionGroupsList[0]
arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
name := "<empty>"
if optionGroup.OptionGroupName != nil && *optionGroup.OptionGroupName != "" {
@ -261,7 +261,7 @@ func resourceAwsDbOptionGroupUpdate(d *schema.ResourceData, meta interface{}) er
}
if arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(rdsconn, d, arn); err != nil {
return err
} else {
@ -333,11 +333,14 @@ func resourceAwsDbOptionHash(v interface{}) int {
return hashcode.String(buf.String())
}
func buildRDSOptionGroupARN(identifier, accountid, region string) (string, error) {
func buildRDSOptionGroupARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS Option Group ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct RDS Option Group ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:aws:rds:%s:%s:og:%s", region, accountid, identifier)
arn := fmt.Sprintf("arn:%s:rds:%s:%s:og:%s", partition, region, accountid, identifier)
return arn, nil
}

View File

@ -141,7 +141,7 @@ func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) e
d.Set("parameter", flattenParameters(describeParametersResp.Parameters))
paramGroup := describeResp.DBParameterGroups[0]
arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
name := "<empty>"
if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" {
@ -217,7 +217,7 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{})
}
}
if arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(rdsconn, d, arn); err != nil {
return err
} else {
@ -278,11 +278,14 @@ func resourceAwsDbParameterHash(v interface{}) int {
return hashcode.String(buf.String())
}
func buildRDSPGARN(identifier, accountid, region string) (string, error) {
func buildRDSPGARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountid, identifier)
arn := fmt.Sprintf("arn:%s:rds:%s:%s:pg:%s", partition, region, accountid, identifier)
return arn, nil
}

View File

@ -176,7 +176,7 @@ func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) er
d.Set("ingress", rules)
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
name := "<empty>"
if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" {
@ -207,7 +207,7 @@ func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{})
conn := meta.(*AWSClient).rdsconn
d.Partial(true)
if arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
@ -421,11 +421,14 @@ func resourceAwsDbSecurityGroupStateRefreshFunc(
}
}
func buildRDSSecurityGroupARN(identifier, accountid, region string) (string, error) {
func buildRDSSecurityGroupARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:aws:rds:%s:%s:secgrp:%s", region, accountid, identifier)
arn := fmt.Sprintf("arn:%s:rds:%s:%s:secgrp:%s", partition, region, accountid, identifier)
return arn, nil
}

View File

@ -130,7 +130,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro
// list tags for resource
// set tags
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName)
} else {
@ -178,7 +178,7 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er
}
}
if arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
@ -227,11 +227,14 @@ func resourceAwsDbSubnetGroupDeleteRefreshFunc(
}
}
func buildRDSsubgrpARN(identifier, accountid, region string) (string, error) {
func buildRDSsubgrpARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountid, identifier)
arn := fmt.Sprintf("arn:%s:rds:%s:%s:subgrp:%s", partition, region, accountid, identifier)
return arn, nil
}

View File

@ -336,7 +336,7 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int
d.Id(), *ds.Stage)
return ds, *ds.Stage, nil
},
Timeout: 30 * time.Minute,
Timeout: 45 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf(

View File

@ -349,7 +349,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
wait := resource.StateChangeConf{
Pending: []string{"DRAINING"},
Target: []string{"INACTIVE"},
Timeout: 5 * time.Minute,
Timeout: 10 * time.Minute,
MinTimeout: 1 * time.Second,
Refresh: func() (interface{}, string, error) {
log.Printf("[DEBUG] Checking if ECS service %s is INACTIVE", d.Id())

View File

@ -27,22 +27,23 @@ func resourceAwsEfsFileSystem() *schema.Resource {
"creation_token": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validateMaxLength(64),
},
"reference_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Deprecated: "Please use attribute `creation_token' instead. This attribute might be removed in future releases.",
ConflictsWith: []string{"creation_token"},
ValidateFunc: validateReferenceName,
Type: schema.TypeString,
Optional: true,
Computed: true,
Deprecated: "Please use attribute `creation_token' instead. This attribute might be removed in future releases.",
ValidateFunc: validateReferenceName,
},
"performance_mode": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validatePerformanceModeType,
},
@ -161,6 +162,22 @@ func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) erro
return err
}
var fs *efs.FileSystemDescription
for _, f := range resp.FileSystems {
if d.Id() == *f.FileSystemId {
fs = f
break
}
}
if fs == nil {
log.Printf("[WARN] EFS (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
d.Set("creation_token", fs.CreationToken)
d.Set("performance_mode", fs.PerformanceMode)
return nil
}

View File

@ -90,6 +90,10 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) {
resource.TestStep{
Config: testAccAWSEFSFileSystemConfig,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"aws_efs_file_system.foo",
"performance_mode",
"generalPurpose"),
testAccCheckEfsFileSystem(
"aws_efs_file_system.foo",
),
@ -278,13 +282,13 @@ func testAccCheckEfsFileSystemPerformanceMode(resourceID string, expectedMode st
const testAccAWSEFSFileSystemConfig = `
resource "aws_efs_file_system" "foo" {
reference_name = "radeksimko"
creation_token = "radeksimko"
}
`
const testAccAWSEFSFileSystemConfigWithTags = `
resource "aws_efs_file_system" "foo-with-tags" {
reference_name = "yada_yada"
creation_token = "yada_yada"
tags {
Name = "foo-efs"
Another = "tag"

View File

@ -29,6 +29,7 @@ func resourceAwsEip() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Computed: true,
},
"instance": &schema.Schema{
@ -168,7 +169,7 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error {
// On import (domain never set, which it must've been if we created),
// set the 'vpc' attribute depending on if we're in a VPC.
if _, ok := d.GetOk("domain"); !ok {
if address.Domain != nil {
d.Set("vpc", *address.Domain == "vpc")
}

View File

@ -2,6 +2,7 @@ package aws
import (
"fmt"
"os"
"strings"
"testing"
@ -12,6 +13,50 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSEIP_importEc2Classic(t *testing.T) {
oldvar := os.Getenv("AWS_DEFAULT_REGION")
os.Setenv("AWS_DEFAULT_REGION", "us-east-1")
defer os.Setenv("AWS_DEFAULT_REGION", oldvar)
resourceName := "aws_eip.bar"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEIPDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSEIPInstanceEc2Classic,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccAWSEIP_importVpc(t *testing.T) {
resourceName := "aws_eip.bar"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEIPDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSEIPNetworkInterfaceConfig,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccAWSEIP_basic(t *testing.T) {
var conf ec2.Address
@ -152,7 +197,7 @@ func testAccCheckAWSEIPDestroy(s *terraform.State) error {
describe, err := conn.DescribeAddresses(req)
if err != nil {
// Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" {
if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" || ae.Code() == "InvalidAddress.NotFound" {
continue
}
return err
@ -168,7 +213,7 @@ func testAccCheckAWSEIPDestroy(s *terraform.State) error {
describe, err := conn.DescribeAddresses(req)
if err != nil {
// Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" {
if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" || ae.Code() == "InvalidAddress.NotFound" {
continue
}
return err
@ -256,6 +301,20 @@ resource "aws_eip" "bar" {
}
`
const testAccAWSEIPInstanceEc2Classic = `
provider "aws" {
region = "us-east-1"
}
resource "aws_instance" "foo" {
ami = "ami-5469ae3c"
instance_type = "m1.small"
}
resource "aws_eip" "bar" {
instance = "${aws_instance.foo.id}"
}
`
const testAccAWSEIPInstanceConfig = `
resource "aws_instance" "foo" {
# us-west-2

View File

@ -334,13 +334,15 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i
// Additions and removals of options are done in a single API call, so we
// can't do our normal "remove these" and then later "add these", re-adding
// any updated settings.
// Because of this, we need to remove any settings in the "removable"
// Because of this, we need to exclude any settings in the "removable"
// settings that are also found in the "add" settings, otherwise they
// conflict. Here we loop through all the initial removables from the set
// difference, and delete from the slice any items found in both `add` and
// `rm` above
// difference, and create a new slice `remove` that contains those settings
// found in `rm` but not in `add`
var remove []*elasticbeanstalk.ConfigurationOptionSetting
if len(add) > 0 {
for i, r := range rm {
for _, r := range rm {
var update = false
for _, a := range add {
// ResourceNames are optional. Some defaults come with it, some do
// not. We need to guard against nil/empty in state as well as
@ -354,14 +356,21 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i
}
}
if *r.Namespace == *a.Namespace && *r.OptionName == *a.OptionName {
log.Printf("[DEBUG] Removing Beanstalk setting: (%s::%s)", *a.Namespace, *a.OptionName)
rm = append(rm[:i], rm[i+1:]...)
log.Printf("[DEBUG] Updating Beanstalk setting (%s::%s) \"%s\" => \"%s\"", *a.Namespace, *a.OptionName, *r.Value, *a.Value)
update = true
break
}
}
// Only remove options that are not updates
if !update {
remove = append(remove, r)
}
}
} else {
remove = rm
}
for _, elem := range rm {
for _, elem := range remove {
updateOpts.OptionsToRemove = append(updateOpts.OptionsToRemove, &elasticbeanstalk.OptionSpecification{
Namespace: elem.Namespace,
OptionName: elem.OptionName,

View File

@ -235,14 +235,14 @@ func TestAccAWSBeanstalkEnv_basic_settings_update(t *testing.T) {
Config: testAccBeanstalkEnvConfig_settings(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app),
testAccVerifyBeanstalkConfig(&app, []string{"TF_LOG", "TF_SOME_VAR"}),
testAccVerifyBeanstalkConfig(&app, []string{"ENV_STATIC", "ENV_UPDATE"}),
),
},
resource.TestStep{
Config: testAccBeanstalkEnvConfig_settings_update(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app),
testAccVerifyBeanstalkConfig(&app, []string{"TF_LOG", "TF_SOME_VAR"}),
testAccVerifyBeanstalkConfig(&app, []string{"ENV_STATIC", "ENV_UPDATE"}),
),
},
resource.TestStep{
@ -280,7 +280,7 @@ func testAccVerifyBeanstalkConfig(env *elasticbeanstalk.EnvironmentDescription,
cs := resp.ConfigurationSettings[0]
var foundEnvs []string
testStrings := []string{"TF_LOG", "TF_SOME_VAR"}
testStrings := []string{"ENV_STATIC", "ENV_UPDATE"}
for _, os := range cs.OptionSettings {
for _, k := range testStrings {
if *os.OptionName == k {
@ -504,13 +504,19 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" {
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "TF_LOG"
name = "ENV_STATIC"
value = "true"
}
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "TF_SOME_VAR"
name = "ENV_UPDATE"
value = "true"
}
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "ENV_REMOVE"
value = "true"
}
@ -553,19 +559,19 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" {
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "TF_LOG"
name = "ENV_STATIC"
value = "true"
}
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "TF_SOME_VAR"
name = "ENV_UPDATE"
value = "false"
}
setting {
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "TF_SOME_NEW_VAR"
name = "ENV_ADD"
value = "true"
}

View File

@ -384,7 +384,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
}
// list tags for resource
// set tags
arn, err := buildECARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId)
} else {
@ -409,7 +409,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elasticacheconn
arn, err := buildECARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id())
} else {
@ -661,11 +661,14 @@ func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, give
}
}
func buildECARN(identifier, accountid, region string) (string, error) {
func buildECARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:aws:elasticache:%s:%s:cluster:%s", region, accountid, identifier)
arn := fmt.Sprintf("arn:%s:elasticache:%s:%s:cluster:%s", partition, region, accountid, identifier)
return arn, nil
}

View File

@ -295,7 +295,7 @@ resource "aws_elasticache_cluster" "bar" {
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 6379
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
snapshot_window = "05:00-09:00"
snapshot_retention_limit = 3
@ -329,7 +329,7 @@ resource "aws_elasticache_cluster" "bar" {
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 6379
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
snapshot_window = "07:00-09:00"
snapshot_retention_limit = 7

View File

@ -31,6 +31,12 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource {
Default: false,
}
resourceSchema["auto_minor_version_upgrade"] = &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
}
resourceSchema["replication_group_description"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
@ -47,6 +53,11 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource {
Computed: true,
}
resourceSchema["configuration_endpoint_address"] = &schema.Schema{
Type: schema.TypeString,
Computed: true,
}
resourceSchema["engine"].Required = false
resourceSchema["engine"].Optional = true
resourceSchema["engine"].Default = "redis"
@ -57,6 +68,9 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource {
Read: resourceAwsElasticacheReplicationGroupRead,
Update: resourceAwsElasticacheReplicationGroupUpdate,
Delete: resourceAwsElasticacheReplicationGroupDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: resourceSchema,
}
@ -70,6 +84,7 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i
ReplicationGroupId: aws.String(d.Get("replication_group_id").(string)),
ReplicationGroupDescription: aws.String(d.Get("replication_group_description").(string)),
AutomaticFailoverEnabled: aws.Bool(d.Get("automatic_failover_enabled").(bool)),
AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
CacheNodeType: aws.String(d.Get("node_type").(string)),
Engine: aws.String(d.Get("engine").(string)),
Port: aws.Int64(int64(d.Get("port").(int))),
@ -192,7 +207,17 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int
return nil
}
d.Set("automatic_failover_enabled", rgp.AutomaticFailover)
if rgp.AutomaticFailover != nil {
switch strings.ToLower(*rgp.AutomaticFailover) {
case "disabled", "disabling":
d.Set("automatic_failover_enabled", false)
case "enabled", "enabling":
d.Set("automatic_failover_enabled", true)
default:
log.Printf("Unknown AutomaticFailover state %s", *rgp.AutomaticFailover)
}
}
d.Set("replication_group_description", rgp.Description)
d.Set("number_cache_clusters", len(rgp.MemberClusters))
d.Set("replication_group_id", rgp.ReplicationGroupId)
@ -217,15 +242,26 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int
d.Set("engine", c.Engine)
d.Set("engine_version", c.EngineVersion)
d.Set("subnet_group_name", c.CacheSubnetGroupName)
d.Set("security_group_names", c.CacheSecurityGroups)
d.Set("security_group_ids", c.SecurityGroups)
d.Set("parameter_group_name", c.CacheParameterGroup)
d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups))
d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups))
if c.CacheParameterGroup != nil {
d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName)
}
d.Set("maintenance_window", c.PreferredMaintenanceWindow)
d.Set("snapshot_window", c.SnapshotWindow)
d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
d.Set("snapshot_window", rgp.SnapshotWindow)
d.Set("snapshot_retention_limit", rgp.SnapshotRetentionLimit)
d.Set("primary_endpoint_address", rgp.NodeGroups[0].PrimaryEndpoint.Address)
if rgp.ConfigurationEndpoint != nil {
d.Set("port", rgp.ConfigurationEndpoint.Port)
d.Set("configuration_endpoint_address", rgp.ConfigurationEndpoint.Address)
} else {
d.Set("port", rgp.NodeGroups[0].PrimaryEndpoint.Port)
d.Set("primary_endpoint_address", rgp.NodeGroups[0].PrimaryEndpoint.Address)
}
d.Set("auto_minor_version_upgrade", c.AutoMinorVersionUpgrade)
}
return nil
@ -250,6 +286,11 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i
requestUpdate = true
}
if d.HasChange("auto_minor_version_upgrade") {
params.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool))
requestUpdate = true
}
if d.HasChange("security_group_ids") {
if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 {
params.SecurityGroupIds = expandStringList(attr.List())

View File

@ -26,6 +26,8 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) {
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"),
),
},
},
@ -48,6 +50,8 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) {
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "replication_group_description", "test description"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"),
),
},
@ -59,6 +63,8 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) {
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "replication_group_description", "updated description"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "true"),
),
},
},
@ -112,7 +118,7 @@ func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "parameter_group_name", "default.redis2.8"),
"aws_elasticache_replication_group.bar", "parameter_group_name", "default.redis3.2"),
),
},
@ -141,6 +147,8 @@ func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) {
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "number_cache_clusters", "1"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"),
),
},
},
@ -162,6 +170,39 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) {
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "snapshot_window", "02:00-03:00"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"),
resource.TestCheckResourceAttrSet(
"aws_elasticache_replication_group.bar", "primary_endpoint_address"),
),
},
},
})
}
func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) {
var rg elasticache.ReplicationGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "snapshot_window", "02:00-03:00"),
resource.TestCheckResourceAttr(
"aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"),
resource.TestCheckResourceAttrSet(
"aws_elasticache_replication_group.bar", "configuration_endpoint_address"),
),
},
},
@ -317,9 +358,10 @@ resource "aws_elasticache_replication_group" "bar" {
node_type = "cache.m1.small"
number_cache_clusters = 2
port = 6379
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
apply_immediately = true
auto_minor_version_upgrade = false
}`, rName, rName, rName)
}
@ -347,7 +389,7 @@ resource "aws_elasticache_security_group" "bar" {
resource "aws_elasticache_parameter_group" "bar" {
name = "allkeys-lru"
family = "redis2.8"
family = "redis3.2"
parameter {
name = "maxmemory-policy"
@ -395,9 +437,10 @@ resource "aws_elasticache_replication_group" "bar" {
node_type = "cache.m1.small"
number_cache_clusters = 2
port = 6379
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
apply_immediately = true
auto_minor_version_upgrade = true
}`, rName, rName, rName)
}
@ -429,7 +472,7 @@ resource "aws_elasticache_replication_group" "bar" {
node_type = "cache.m1.medium"
number_cache_clusters = 2
port = 6379
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
apply_immediately = true
}`, rName, rName, rName)
@ -478,8 +521,9 @@ resource "aws_elasticache_replication_group" "bar" {
port = 6379
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
availability_zones = ["us-west-2a"]
auto_minor_version_upgrade = false
}
`, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))
@ -539,8 +583,75 @@ resource "aws_elasticache_replication_group" "bar" {
port = 6379
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.redis2.8"
parameter_group_name = "default.redis3.2"
availability_zones = ["us-west-2a","us-west-2b"]
automatic_failover_enabled = true
snapshot_window = "02:00-03:00"
snapshot_retention_limit = 7
}
`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))
var testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig = fmt.Sprintf(`
resource "aws_vpc" "foo" {
cidr_block = "192.168.0.0/16"
tags {
Name = "tf-test"
}
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "192.168.0.0/20"
availability_zone = "us-west-2a"
tags {
Name = "tf-test-%03d"
}
}
resource "aws_subnet" "bar" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "192.168.16.0/20"
availability_zone = "us-west-2b"
tags {
Name = "tf-test-%03d"
}
}
resource "aws_elasticache_subnet_group" "bar" {
name = "tf-test-cache-subnet-%03d"
description = "tf-test-cache-subnet-group-descr"
subnet_ids = [
"${aws_subnet.foo.id}",
"${aws_subnet.bar.id}"
]
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
vpc_id = "${aws_vpc.foo.id}"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_replication_group" "bar" {
replication_group_id = "tf-%s"
replication_group_description = "test description"
node_type = "cache.t2.micro"
number_cache_clusters = "2"
port = 6379
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.redis3.2.cluster.on"
availability_zones = ["us-west-2a","us-west-2b"]
automatic_failover_enabled = true
snapshot_window = "02:00-03:00"
snapshot_retention_limit = 7
engine_version = "3.2.4"
maintenance_window = "thu:03:00-thu:04:00"
}
`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))

View File

@ -0,0 +1,668 @@
package aws
import (
"log"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsEMRCluster() *schema.Resource {
return &schema.Resource{
Create: resourceAwsEMRClusterCreate,
Read: resourceAwsEMRClusterRead,
Update: resourceAwsEMRClusterUpdate,
Delete: resourceAwsEMRClusterDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"release_label": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"master_instance_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"core_instance_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"core_instance_count": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 0,
},
"cluster_state": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"log_uri": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"master_public_dns": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"applications": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"ec2_attributes": &schema.Schema{
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"subnet_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"additional_master_security_groups": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"additional_slave_security_groups": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"emr_managed_master_security_group": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"emr_managed_slave_security_group": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"instance_profile": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"bootstrap_action": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"path": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"args": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
},
},
"tags": tagsSchema(),
"configurations": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"service_role": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"visible_to_all_users": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: true,
},
},
}
}
func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
log.Printf("[DEBUG] Creating EMR cluster")
masterInstanceType := d.Get("master_instance_type").(string)
coreInstanceType := masterInstanceType
if v, ok := d.GetOk("core_instance_type"); ok {
coreInstanceType = v.(string)
}
coreInstanceCount := d.Get("core_instance_count").(int)
applications := d.Get("applications").(*schema.Set).List()
instanceConfig := &emr.JobFlowInstancesConfig{
MasterInstanceType: aws.String(masterInstanceType),
SlaveInstanceType: aws.String(coreInstanceType),
InstanceCount: aws.Int64(int64(coreInstanceCount)),
// Default values that we can open up in the future
KeepJobFlowAliveWhenNoSteps: aws.Bool(true),
TerminationProtected: aws.Bool(false),
}
var instanceProfile string
if a, ok := d.GetOk("ec2_attributes"); ok {
ec2Attributes := a.([]interface{})
attributes := ec2Attributes[0].(map[string]interface{})
if v, ok := attributes["key_name"]; ok {
instanceConfig.Ec2KeyName = aws.String(v.(string))
}
if v, ok := attributes["subnet_id"]; ok {
instanceConfig.Ec2SubnetId = aws.String(v.(string))
}
if v, ok := attributes["subnet_id"]; ok {
instanceConfig.Ec2SubnetId = aws.String(v.(string))
}
if v, ok := attributes["additional_master_security_groups"]; ok {
strSlice := strings.Split(v.(string), ",")
for i, s := range strSlice {
strSlice[i] = strings.TrimSpace(s)
}
instanceConfig.AdditionalMasterSecurityGroups = aws.StringSlice(strSlice)
}
if v, ok := attributes["additional_slave_security_groups"]; ok {
strSlice := strings.Split(v.(string), ",")
for i, s := range strSlice {
strSlice[i] = strings.TrimSpace(s)
}
instanceConfig.AdditionalSlaveSecurityGroups = aws.StringSlice(strSlice)
}
if v, ok := attributes["emr_managed_master_security_group"]; ok {
instanceConfig.EmrManagedMasterSecurityGroup = aws.String(v.(string))
}
if v, ok := attributes["emr_managed_slave_security_group"]; ok {
instanceConfig.EmrManagedSlaveSecurityGroup = aws.String(v.(string))
}
if len(strings.TrimSpace(attributes["instance_profile"].(string))) != 0 {
instanceProfile = strings.TrimSpace(attributes["instance_profile"].(string))
}
}
emrApps := expandApplications(applications)
params := &emr.RunJobFlowInput{
Instances: instanceConfig,
Name: aws.String(d.Get("name").(string)),
Applications: emrApps,
ReleaseLabel: aws.String(d.Get("release_label").(string)),
ServiceRole: aws.String(d.Get("service_role").(string)),
VisibleToAllUsers: aws.Bool(d.Get("visible_to_all_users").(bool)),
}
if v, ok := d.GetOk("log_uri"); ok {
params.LogUri = aws.String(v.(string))
}
if instanceProfile != "" {
params.JobFlowRole = aws.String(instanceProfile)
}
if v, ok := d.GetOk("bootstrap_action"); ok {
bootstrapActions := v.(*schema.Set).List()
params.BootstrapActions = expandBootstrapActions(bootstrapActions)
}
if v, ok := d.GetOk("tags"); ok {
tagsIn := v.(map[string]interface{})
params.Tags = expandTags(tagsIn)
}
if v, ok := d.GetOk("configurations"); ok {
confUrl := v.(string)
params.Configurations = expandConfigures(confUrl)
}
log.Printf("[DEBUG] EMR Cluster create options: %s", params)
resp, err := conn.RunJobFlow(params)
if err != nil {
log.Printf("[ERROR] %s", err)
return err
}
d.SetId(*resp.JobFlowId)
log.Println(
"[INFO] Waiting for EMR Cluster to be available")
stateConf := &resource.StateChangeConf{
Pending: []string{"STARTING", "BOOTSTRAPPING"},
Target: []string{"WAITING", "RUNNING"},
Refresh: resourceAwsEMRClusterStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\": %s", err)
}
return resourceAwsEMRClusterRead(d, meta)
}
func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error {
emrconn := meta.(*AWSClient).emrconn
req := &emr.DescribeClusterInput{
ClusterId: aws.String(d.Id()),
}
resp, err := emrconn.DescribeCluster(req)
if err != nil {
return fmt.Errorf("Error reading EMR cluster: %s", err)
}
if resp.Cluster == nil {
log.Printf("[DEBUG] EMR Cluster (%s) not found", d.Id())
d.SetId("")
return nil
}
cluster := resp.Cluster
if cluster.Status != nil {
if *cluster.Status.State == "TERMINATED" {
log.Printf("[DEBUG] EMR Cluster (%s) was TERMINATED already", d.Id())
d.SetId("")
return nil
}
if *cluster.Status.State == "TERMINATED_WITH_ERRORS" {
log.Printf("[DEBUG] EMR Cluster (%s) was TERMINATED_WITH_ERRORS already", d.Id())
d.SetId("")
return nil
}
d.Set("cluster_state", cluster.Status.State)
}
instanceGroups, err := fetchAllEMRInstanceGroups(meta, d.Id())
if err == nil {
coreGroup := findGroup(instanceGroups, "CORE")
if coreGroup != nil {
d.Set("core_instance_type", coreGroup.InstanceType)
}
}
d.Set("name", cluster.Name)
d.Set("service_role", cluster.ServiceRole)
d.Set("release_label", cluster.ReleaseLabel)
d.Set("log_uri", cluster.LogUri)
d.Set("master_public_dns", cluster.MasterPublicDnsName)
d.Set("visible_to_all_users", cluster.VisibleToAllUsers)
d.Set("tags", tagsToMapEMR(cluster.Tags))
if err := d.Set("applications", flattenApplications(cluster.Applications)); err != nil {
log.Printf("[ERR] Error setting EMR Applications for cluster (%s): %s", d.Id(), err)
}
// Configurations is a JSON document. It's built with an expand method but a
// simple string should be returned as JSON
if err := d.Set("configurations", cluster.Configurations); err != nil {
log.Printf("[ERR] Error setting EMR configurations for cluster (%s): %s", d.Id(), err)
}
if err := d.Set("ec2_attributes", flattenEc2Attributes(cluster.Ec2InstanceAttributes)); err != nil {
log.Printf("[ERR] Error setting EMR Ec2 Attributes: %s", err)
}
return nil
}
func resourceAwsEMRClusterUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
if d.HasChange("core_instance_count") {
log.Printf("[DEBUG] Modify EMR cluster")
groups, err := fetchAllEMRInstanceGroups(meta, d.Id())
if err != nil {
log.Printf("[DEBUG] Error finding all instance groups: %s", err)
return err
}
coreInstanceCount := d.Get("core_instance_count").(int)
coreGroup := findGroup(groups, "CORE")
if coreGroup == nil {
return fmt.Errorf("[ERR] Error finding core group")
}
params := &emr.ModifyInstanceGroupsInput{
InstanceGroups: []*emr.InstanceGroupModifyConfig{
{
InstanceGroupId: coreGroup.Id,
InstanceCount: aws.Int64(int64(coreInstanceCount)),
},
},
}
_, errModify := conn.ModifyInstanceGroups(params)
if errModify != nil {
log.Printf("[ERROR] %s", errModify)
return errModify
}
log.Printf("[DEBUG] Modify EMR Cluster done...")
}
log.Println(
"[INFO] Waiting for EMR Cluster to be available")
stateConf := &resource.StateChangeConf{
Pending: []string{"STARTING", "BOOTSTRAPPING"},
Target: []string{"WAITING", "RUNNING"},
Refresh: resourceAwsEMRClusterStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 5 * time.Second,
}
_, err := stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\" after modification: %s", err)
}
return resourceAwsEMRClusterRead(d, meta)
}
func resourceAwsEMRClusterDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
req := &emr.TerminateJobFlowsInput{
JobFlowIds: []*string{
aws.String(d.Id()),
},
}
_, err := conn.TerminateJobFlows(req)
if err != nil {
log.Printf("[ERROR], %s", err)
return err
}
err = resource.Retry(10*time.Minute, func() *resource.RetryError {
resp, err := conn.ListInstances(&emr.ListInstancesInput{
ClusterId: aws.String(d.Id()),
})
if err != nil {
return resource.NonRetryableError(err)
}
instanceCount := len(resp.Instances)
if resp == nil || instanceCount == 0 {
log.Printf("[DEBUG] No instances found for EMR Cluster (%s)", d.Id())
return nil
}
// Collect instance status states, wait for all instances to be terminated
// before moving on
var terminated []string
for j, i := range resp.Instances {
if i.Status != nil {
if *i.Status.State == "TERMINATED" {
terminated = append(terminated, *i.Ec2InstanceId)
}
} else {
log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId)
}
}
if len(terminated) == instanceCount {
log.Printf("[DEBUG] All (%d) EMR Cluster (%s) Instances terminated", instanceCount, d.Id())
return nil
}
return resource.RetryableError(fmt.Errorf("[DEBUG] EMR Cluster (%s) has (%d) Instances remaining, retrying", d.Id(), len(resp.Instances)))
})
if err != nil {
log.Printf("[ERR] Error waiting for EMR Cluster (%s) Instances to drain", d.Id())
}
d.SetId("")
return nil
}
func expandApplications(apps []interface{}) []*emr.Application {
appOut := make([]*emr.Application, 0, len(apps))
for _, appName := range expandStringList(apps) {
app := &emr.Application{
Name: appName,
}
appOut = append(appOut, app)
}
return appOut
}
func flattenApplications(apps []*emr.Application) []interface{} {
appOut := make([]interface{}, 0, len(apps))
for _, app := range apps {
appOut = append(appOut, *app.Name)
}
return appOut
}
func flattenEc2Attributes(ia *emr.Ec2InstanceAttributes) []map[string]interface{} {
attrs := map[string]interface{}{}
result := make([]map[string]interface{}, 0)
if ia.Ec2KeyName != nil {
attrs["key_name"] = *ia.Ec2KeyName
}
if ia.Ec2SubnetId != nil {
attrs["subnet_id"] = *ia.Ec2SubnetId
}
if ia.IamInstanceProfile != nil {
attrs["instance_profile"] = *ia.IamInstanceProfile
}
if ia.EmrManagedMasterSecurityGroup != nil {
attrs["emr_managed_master_security_group"] = *ia.EmrManagedMasterSecurityGroup
}
if ia.EmrManagedSlaveSecurityGroup != nil {
attrs["emr_managed_slave_security_group"] = *ia.EmrManagedSlaveSecurityGroup
}
if len(ia.AdditionalMasterSecurityGroups) > 0 {
strs := aws.StringValueSlice(ia.AdditionalMasterSecurityGroups)
attrs["additional_master_security_groups"] = strings.Join(strs, ",")
}
if len(ia.AdditionalSlaveSecurityGroups) > 0 {
strs := aws.StringValueSlice(ia.AdditionalSlaveSecurityGroups)
attrs["additional_slave_security_groups"] = strings.Join(strs, ",")
}
result = append(result, attrs)
return result
}
func loadGroups(d *schema.ResourceData, meta interface{}) ([]*emr.InstanceGroup, error) {
emrconn := meta.(*AWSClient).emrconn
reqGrps := &emr.ListInstanceGroupsInput{
ClusterId: aws.String(d.Id()),
}
respGrps, errGrps := emrconn.ListInstanceGroups(reqGrps)
if errGrps != nil {
return nil, fmt.Errorf("Error reading EMR cluster: %s", errGrps)
}
return respGrps.InstanceGroups, nil
}
func findGroup(grps []*emr.InstanceGroup, typ string) *emr.InstanceGroup {
for _, grp := range grps {
if grp.InstanceGroupType != nil {
if *grp.InstanceGroupType == typ {
return grp
}
}
}
return nil
}
func expandTags(m map[string]interface{}) []*emr.Tag {
var result []*emr.Tag
for k, v := range m {
result = append(result, &emr.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
})
}
return result
}
func tagsToMapEMR(ts []*emr.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
result[*t.Key] = *t.Value
}
return result
}
func expandBootstrapActions(bootstrapActions []interface{}) []*emr.BootstrapActionConfig {
actionsOut := []*emr.BootstrapActionConfig{}
for _, raw := range bootstrapActions {
actionAttributes := raw.(map[string]interface{})
actionName := actionAttributes["name"].(string)
actionPath := actionAttributes["path"].(string)
actionArgs := actionAttributes["args"].(*schema.Set).List()
action := &emr.BootstrapActionConfig{
Name: aws.String(actionName),
ScriptBootstrapAction: &emr.ScriptBootstrapActionConfig{
Path: aws.String(actionPath),
Args: expandStringList(actionArgs),
},
}
actionsOut = append(actionsOut, action)
}
return actionsOut
}
func expandConfigures(input string) []*emr.Configuration {
configsOut := []*emr.Configuration{}
if strings.HasPrefix(input, "http") {
if err := readHttpJson(input, &configsOut); err != nil {
log.Printf("[ERR] Error reading HTTP JSON: %s", err)
}
} else if strings.HasSuffix(input, ".json") {
if err := readLocalJson(input, &configsOut); err != nil {
log.Printf("[ERR] Error reading local JSON: %s", err)
}
} else {
if err := readBodyJson(input, &configsOut); err != nil {
log.Printf("[ERR] Error reading body JSON: %s", err)
}
}
log.Printf("[DEBUG] Expanded EMR Configurations %s", configsOut)
return configsOut
}
func readHttpJson(url string, target interface{}) error {
r, err := http.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
func readLocalJson(localFile string, target interface{}) error {
file, e := ioutil.ReadFile(localFile)
if e != nil {
log.Printf("[ERROR] %s", e)
return e
}
return json.Unmarshal(file, target)
}
func readBodyJson(body string, target interface{}) error {
log.Printf("[DEBUG] Raw Body %s\n", body)
err := json.Unmarshal([]byte(body), target)
if err != nil {
log.Printf("[ERROR] parsing JSON %s", err)
return err
}
return nil
}
func resourceAwsEMRClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
conn := meta.(*AWSClient).emrconn
log.Printf("[INFO] Reading EMR Cluster Information: %s", d.Id())
params := &emr.DescribeClusterInput{
ClusterId: aws.String(d.Id()),
}
resp, err := conn.DescribeCluster(params)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "ClusterNotFound" == awsErr.Code() {
return 42, "destroyed", nil
}
}
log.Printf("[WARN] Error on retrieving EMR Cluster (%s) when waiting: %s", d.Id(), err)
return nil, "", err
}
emrc := resp.Cluster
if emrc == nil {
return 42, "destroyed", nil
}
if resp.Cluster.Status != nil {
log.Printf("[DEBUG] EMR Cluster status (%s): %s", d.Id(), *resp.Cluster.Status)
}
return emrc, *emrc.Status.State, nil
}
}

View File

@ -0,0 +1,373 @@
package aws
import (
"fmt"
"log"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSEMRCluster_basic(t *testing.T) {
var jobFlow emr.RunJobFlowOutput
r := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEmrDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSEmrClusterConfig(r),
Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow),
},
},
})
}
func testAccCheckAWSEmrDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).emrconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_emr_cluster" {
continue
}
params := &emr.DescribeClusterInput{
ClusterId: aws.String(rs.Primary.ID),
}
describe, err := conn.DescribeCluster(params)
if err == nil {
if describe.Cluster != nil &&
*describe.Cluster.Status.State == "WAITING" {
return fmt.Errorf("EMR Cluster still exists")
}
}
providerErr, ok := err.(awserr.Error)
if !ok {
return err
}
log.Printf("[ERROR] %v", providerErr)
}
return nil
}
func testAccCheckAWSEmrClusterExists(n string, v *emr.RunJobFlowOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No cluster id set")
}
conn := testAccProvider.Meta().(*AWSClient).emrconn
describe, err := conn.DescribeCluster(&emr.DescribeClusterInput{
ClusterId: aws.String(rs.Primary.ID),
})
if err != nil {
return fmt.Errorf("EMR error: %v", err)
}
if describe.Cluster != nil &&
*describe.Cluster.Id != rs.Primary.ID {
return fmt.Errorf("EMR cluser not found")
}
if describe.Cluster != nil &&
*describe.Cluster.Status.State != "WAITING" {
return fmt.Errorf("EMR cluser is not up yet")
}
return nil
}
}
func testAccAWSEmrClusterConfig(r int) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-west-2"
}
resource "aws_emr_cluster" "tf-test-cluster" {
name = "emr-test-%d"
release_label = "emr-4.6.0"
applications = ["Spark"]
ec2_attributes {
subnet_id = "${aws_subnet.main.id}"
emr_managed_master_security_group = "${aws_security_group.allow_all.id}"
emr_managed_slave_security_group = "${aws_security_group.allow_all.id}"
instance_profile = "${aws_iam_instance_profile.emr_profile.arn}"
}
master_instance_type = "m3.xlarge"
core_instance_type = "m3.xlarge"
core_instance_count = 1
tags {
role = "rolename"
dns_zone = "env_zone"
env = "env"
name = "name-env"
}
bootstrap_action {
path = "s3://elasticmapreduce/bootstrap-actions/run-if"
name = "runif"
args = ["instance.isMaster=true", "echo running on master node"]
}
configurations = "test-fixtures/emr_configurations.json"
depends_on = ["aws_main_route_table_association.a"]
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
}
resource "aws_security_group" "allow_all" {
name = "allow_all"
description = "Allow all inbound traffic"
vpc_id = "${aws_vpc.main.id}"
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
depends_on = ["aws_subnet.main"]
lifecycle {
ignore_changes = ["ingress", "egress"]
}
tags {
name = "emr_test"
}
}
resource "aws_vpc" "main" {
cidr_block = "168.31.0.0/16"
enable_dns_hostnames = true
tags {
name = "emr_test"
}
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.main.id}"
cidr_block = "168.31.0.0/20"
tags {
name = "emr_test"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}
resource "aws_route_table" "r" {
vpc_id = "${aws_vpc.main.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
}
resource "aws_main_route_table_association" "a" {
vpc_id = "${aws_vpc.main.id}"
route_table_id = "${aws_route_table.r.id}"
}
###
# IAM things
###
# IAM role for EMR Service
resource "aws_iam_role" "iam_emr_default_role" {
name = "iam_emr_default_role_%d"
assume_role_policy = <<EOT
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "elasticmapreduce.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}
resource "aws_iam_role_policy_attachment" "service-attach" {
role = "${aws_iam_role.iam_emr_default_role.id}"
policy_arn = "${aws_iam_policy.iam_emr_default_policy.arn}"
}
resource "aws_iam_policy" "iam_emr_default_policy" {
name = "iam_emr_default_policy_%d"
policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CancelSpotInstanceRequests",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:DeleteTags",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeAccountAttributes",
"ec2:DescribeDhcpOptions",
"ec2:DescribeInstanceStatus",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSpotInstanceRequests",
"ec2:DescribeSpotPriceHistory",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:RequestSpotInstances",
"ec2:RevokeSecurityGroupEgress",
"ec2:RunInstances",
"ec2:TerminateInstances",
"ec2:DeleteVolume",
"ec2:DescribeVolumeStatus",
"ec2:DescribeVolumes",
"ec2:DetachVolume",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListRolePolicies",
"iam:PassRole",
"s3:CreateBucket",
"s3:Get*",
"s3:List*",
"sdb:BatchPutAttributes",
"sdb:Select",
"sqs:CreateQueue",
"sqs:Delete*",
"sqs:GetQueue*",
"sqs:PurgeQueue",
"sqs:ReceiveMessage"
]
}]
}
EOT
}
# IAM Role for EC2 Instance Profile
resource "aws_iam_role" "iam_emr_profile_role" {
name = "iam_emr_profile_role_%d"
assume_role_policy = <<EOT
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}
resource "aws_iam_instance_profile" "emr_profile" {
name = "emr_profile_%d"
roles = ["${aws_iam_role.iam_emr_profile_role.name}"]
}
resource "aws_iam_role_policy_attachment" "profile-attach" {
role = "${aws_iam_role.iam_emr_profile_role.id}"
policy_arn = "${aws_iam_policy.iam_emr_profile_policy.arn}"
}
resource "aws_iam_policy" "iam_emr_profile_policy" {
name = "iam_emr_profile_policy_%d"
policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"cloudwatch:*",
"dynamodb:*",
"ec2:Describe*",
"elasticmapreduce:Describe*",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListInstances",
"elasticmapreduce:ListSteps",
"kinesis:CreateStream",
"kinesis:DeleteStream",
"kinesis:DescribeStream",
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:MergeShards",
"kinesis:PutRecord",
"kinesis:SplitShard",
"rds:Describe*",
"s3:*",
"sdb:*",
"sns:*",
"sqs:*"
]
}]
}
EOT
}
`, r, r, r, r, r, r)
}

View File

@ -0,0 +1,251 @@
package aws
import (
"errors"
"log"
"time"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
var emrInstanceGroupNotFound = errors.New("No matching EMR Instance Group")
func resourceAwsEMRInstanceGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsEMRInstanceGroupCreate,
Read: resourceAwsEMRInstanceGroupRead,
Update: resourceAwsEMRInstanceGroupUpdate,
Delete: resourceAwsEMRInstanceGroupDelete,
Schema: map[string]*schema.Schema{
"cluster_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"instance_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"instance_count": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 0,
},
"running_instance_count": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceAwsEMRInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
clusterId := d.Get("cluster_id").(string)
instanceType := d.Get("instance_type").(string)
instanceCount := d.Get("instance_count").(int)
groupName := d.Get("name").(string)
params := &emr.AddInstanceGroupsInput{
InstanceGroups: []*emr.InstanceGroupConfig{
{
InstanceRole: aws.String("TASK"),
InstanceCount: aws.Int64(int64(instanceCount)),
InstanceType: aws.String(instanceType),
Name: aws.String(groupName),
},
},
JobFlowId: aws.String(clusterId),
}
log.Printf("[DEBUG] Creating EMR task group params: %s", params)
resp, err := conn.AddInstanceGroups(params)
if err != nil {
return err
}
log.Printf("[DEBUG] Created EMR task group finished: %#v", resp)
if resp == nil || len(resp.InstanceGroupIds) == 0 {
return fmt.Errorf("Error creating instance groups: no instance group returned")
}
d.SetId(*resp.InstanceGroupIds[0])
return nil
}
func resourceAwsEMRInstanceGroupRead(d *schema.ResourceData, meta interface{}) error {
group, err := fetchEMRInstanceGroup(meta, d.Get("cluster_id").(string), d.Id())
if err != nil {
switch err {
case emrInstanceGroupNotFound:
log.Printf("[DEBUG] EMR Instance Group (%s) not found, removing", d.Id())
d.SetId("")
return nil
default:
return err
}
}
// Guard against the chance of fetchEMRInstanceGroup returning nil group but
// not a emrInstanceGroupNotFound error
if group == nil {
log.Printf("[DEBUG] EMR Instance Group (%s) not found, removing", d.Id())
d.SetId("")
return nil
}
d.Set("name", group.Name)
d.Set("instance_count", group.RequestedInstanceCount)
d.Set("running_instance_count", group.RunningInstanceCount)
d.Set("instance_type", group.InstanceType)
if group.Status != nil && group.Status.State != nil {
d.Set("status", group.Status.State)
}
return nil
}
func fetchAllEMRInstanceGroups(meta interface{}, clusterId string) ([]*emr.InstanceGroup, error) {
conn := meta.(*AWSClient).emrconn
req := &emr.ListInstanceGroupsInput{
ClusterId: aws.String(clusterId),
}
var groups []*emr.InstanceGroup
marker := aws.String("intitial")
for marker != nil {
log.Printf("[DEBUG] EMR Cluster Instance Marker: %s", *marker)
respGrps, errGrps := conn.ListInstanceGroups(req)
if errGrps != nil {
return nil, fmt.Errorf("[ERR] Error reading EMR cluster (%s): %s", clusterId, errGrps)
}
if respGrps == nil {
return nil, fmt.Errorf("[ERR] Error reading EMR Instance Group for cluster (%s)", clusterId)
}
if respGrps.InstanceGroups != nil {
for _, g := range respGrps.InstanceGroups {
groups = append(groups, g)
}
} else {
log.Printf("[DEBUG] EMR Instance Group list was empty")
}
marker = respGrps.Marker
}
if len(groups) == 0 {
return nil, fmt.Errorf("[WARN] No instance groups found for EMR Cluster (%s)", clusterId)
}
return groups, nil
}
func fetchEMRInstanceGroup(meta interface{}, clusterId, groupId string) (*emr.InstanceGroup, error) {
groups, err := fetchAllEMRInstanceGroups(meta, clusterId)
if err != nil {
return nil, err
}
var group *emr.InstanceGroup
for _, ig := range groups {
if groupId == *ig.Id {
group = ig
break
}
}
if group != nil {
return group, nil
}
return nil, emrInstanceGroupNotFound
}
func resourceAwsEMRInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
log.Printf("[DEBUG] Modify EMR task group")
instanceCount := d.Get("instance_count").(int)
params := &emr.ModifyInstanceGroupsInput{
InstanceGroups: []*emr.InstanceGroupModifyConfig{
{
InstanceGroupId: aws.String(d.Id()),
InstanceCount: aws.Int64(int64(instanceCount)),
},
},
}
_, err := conn.ModifyInstanceGroups(params)
if err != nil {
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{"PROVISIONING", "BOOTSTRAPPING", "RESIZING"},
Target: []string{"RUNNING"},
Refresh: instanceGroupStateRefresh(conn, d.Get("cluster_id").(string), d.Id()),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to terminate: %s", d.Id(), err)
}
return resourceAwsEMRInstanceGroupRead(d, meta)
}
func instanceGroupStateRefresh(meta interface{}, clusterID, igID string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
group, err := fetchEMRInstanceGroup(meta, clusterID, igID)
if err != nil {
return nil, "Not Found", err
}
if group.Status == nil || group.Status.State == nil {
log.Printf("[WARN] ERM Instance Group found, but without state")
return nil, "Undefined", fmt.Errorf("Undefined EMR Cluster Instance Group state")
}
return group, *group.Status.State, nil
}
}
func resourceAwsEMRInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error {
log.Printf("[WARN] AWS EMR Instance Group does not support DELETE; resizing cluster to zero before removing from state")
conn := meta.(*AWSClient).emrconn
params := &emr.ModifyInstanceGroupsInput{
InstanceGroups: []*emr.InstanceGroupModifyConfig{
{
InstanceGroupId: aws.String(d.Id()),
InstanceCount: aws.Int64(0),
},
},
}
_, err := conn.ModifyInstanceGroups(params)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,356 @@
package aws
import (
"fmt"
"log"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSEMRInstanceGroup_basic(t *testing.T) {
var ig emr.InstanceGroup
rInt := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEmrInstanceGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSEmrInstanceGroupConfig(rInt),
Check: testAccCheckAWSEmrInstanceGroupExists("aws_emr_instance_group.task", &ig),
},
},
})
}
func testAccCheckAWSEmrInstanceGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).emrconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_emr_cluster" {
continue
}
params := &emr.DescribeClusterInput{
ClusterId: aws.String(rs.Primary.ID),
}
describe, err := conn.DescribeCluster(params)
if err == nil {
if describe.Cluster != nil &&
*describe.Cluster.Status.State == "WAITING" {
return fmt.Errorf("EMR Cluster still exists")
}
}
providerErr, ok := err.(awserr.Error)
if !ok {
return err
}
log.Printf("[ERROR] %v", providerErr)
}
return nil
}
func testAccCheckAWSEmrInstanceGroupExists(n string, v *emr.InstanceGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No task group id set")
}
meta := testAccProvider.Meta()
g, err := fetchEMRInstanceGroup(meta, rs.Primary.Attributes["cluster_id"], rs.Primary.ID)
if err != nil {
return fmt.Errorf("EMR error: %v", err)
}
if g == nil {
return fmt.Errorf("No match found for (%s)", n)
}
v = g
return nil
}
}
func testAccAWSEmrInstanceGroupConfig(r int) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-west-2"
}
resource "aws_emr_cluster" "tf-test-cluster" {
name = "tf-test-emr-%d"
release_label = "emr-4.6.0"
applications = ["Spark"]
ec2_attributes {
subnet_id = "${aws_subnet.main.id}"
emr_managed_master_security_group = "${aws_security_group.allow_all.id}"
emr_managed_slave_security_group = "${aws_security_group.allow_all.id}"
instance_profile = "${aws_iam_instance_profile.emr_profile.arn}"
}
master_instance_type = "m3.xlarge"
core_instance_type = "m3.xlarge"
core_instance_count = 2
tags {
role = "rolename"
dns_zone = "env_zone"
env = "env"
name = "name-env"
}
bootstrap_action {
path = "s3://elasticmapreduce/bootstrap-actions/run-if"
name = "runif"
args = ["instance.isMaster=true", "echo running on master node"]
}
configurations = "test-fixtures/emr_configurations.json"
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_emr_instance_group" "task" {
cluster_id = "${aws_emr_cluster.tf-test-cluster.id}"
instance_count = 1
instance_type = "m3.xlarge"
}
resource "aws_security_group" "allow_all" {
name = "allow_all"
description = "Allow all inbound traffic"
vpc_id = "${aws_vpc.main.id}"
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
depends_on = ["aws_subnet.main"]
lifecycle {
ignore_changes = ["ingress", "egress"]
}
}
resource "aws_vpc" "main" {
cidr_block = "168.31.0.0/16"
enable_dns_hostnames = true
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.main.id}"
cidr_block = "168.31.0.0/20"
# map_public_ip_on_launch = true
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}
resource "aws_route_table" "r" {
vpc_id = "${aws_vpc.main.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
}
resource "aws_main_route_table_association" "a" {
vpc_id = "${aws_vpc.main.id}"
route_table_id = "${aws_route_table.r.id}"
}
###
# IAM role for EMR Service
resource "aws_iam_role" "iam_emr_default_role" {
name = "iam_emr_default_role_%d"
assume_role_policy = <<EOT
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "elasticmapreduce.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}
resource "aws_iam_role_policy_attachment" "service-attach" {
role = "${aws_iam_role.iam_emr_default_role.id}"
policy_arn = "${aws_iam_policy.iam_emr_default_policy.arn}"
}
resource "aws_iam_policy" "iam_emr_default_policy" {
name = "iam_emr_default_policy_%d"
policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CancelSpotInstanceRequests",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:DeleteTags",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeAccountAttributes",
"ec2:DescribeDhcpOptions",
"ec2:DescribeInstanceStatus",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSpotInstanceRequests",
"ec2:DescribeSpotPriceHistory",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:RequestSpotInstances",
"ec2:RevokeSecurityGroupEgress",
"ec2:RunInstances",
"ec2:TerminateInstances",
"ec2:DeleteVolume",
"ec2:DescribeVolumeStatus",
"ec2:DescribeVolumes",
"ec2:DetachVolume",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListRolePolicies",
"iam:PassRole",
"s3:CreateBucket",
"s3:Get*",
"s3:List*",
"sdb:BatchPutAttributes",
"sdb:Select",
"sqs:CreateQueue",
"sqs:Delete*",
"sqs:GetQueue*",
"sqs:PurgeQueue",
"sqs:ReceiveMessage"
]
}]
}
EOT
}
# IAM Role for EC2 Instance Profile
resource "aws_iam_role" "iam_emr_profile_role" {
name = "iam_emr_profile_role_%d"
assume_role_policy = <<EOT
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}
resource "aws_iam_instance_profile" "emr_profile" {
name = "emr_profile_%d"
roles = ["${aws_iam_role.iam_emr_profile_role.name}"]
}
resource "aws_iam_role_policy_attachment" "profile-attach" {
role = "${aws_iam_role.iam_emr_profile_role.id}"
policy_arn = "${aws_iam_policy.iam_emr_profile_policy.arn}"
}
resource "aws_iam_policy" "iam_emr_profile_policy" {
name = "iam_emr_profile_policy_%d"
policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"cloudwatch:*",
"dynamodb:*",
"ec2:Describe*",
"elasticmapreduce:Describe*",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListInstances",
"elasticmapreduce:ListSteps",
"kinesis:CreateStream",
"kinesis:DeleteStream",
"kinesis:DescribeStream",
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:MergeShards",
"kinesis:PutRecord",
"kinesis:SplitShard",
"rds:Describe*",
"s3:*",
"sdb:*",
"sns:*",
"sqs:*"
]
}]
}
EOT
}`, r, r, r, r, r, r)
}

View File

@ -2,6 +2,7 @@ package aws
import (
"fmt"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@ -30,8 +31,9 @@ func resourceAwsIamGroup() *schema.Resource {
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAwsIamGroupName,
},
"path": &schema.Schema{
Type: schema.TypeString,
@ -127,3 +129,13 @@ func resourceAwsIamGroupDelete(d *schema.ResourceData, meta interface{}) error {
}
return nil
}
func validateAwsIamGroupName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9A-Za-z=,.@\-_]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alphanumeric characters, hyphens, underscores, commas, periods, @ symbols and equals signs allowed in %q: %q",
k, value))
}
return
}

View File

@ -11,6 +11,42 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestValidateIamGroupName(t *testing.T) {
validNames := []string{
"test-group",
"test_group",
"testgroup123",
"TestGroup",
"Test-Group",
"test.group",
"test.123,group",
"testgroup@hashicorp",
}
for _, v := range validNames {
_, errors := validateAwsIamGroupName(v, "name")
if len(errors) != 0 {
t.Fatalf("%q should be a valid IAM Group name: %q", v, errors)
}
}
invalidNames := []string{
"!",
"/",
" ",
":",
";",
"test name",
"/slash-at-the-beginning",
"slash-at-the-end/",
}
for _, v := range invalidNames {
_, errors := validateAwsIamGroupName(v, "name")
if len(errors) == 0 {
t.Fatalf("%q should be an invalid IAM Group name", v)
}
}
}
func TestAccAWSIAMGroup_basic(t *testing.T) {
var conf iam.GetGroupOutput

View File

@ -3,6 +3,7 @@ package aws
import (
"fmt"
"log"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@ -39,8 +40,9 @@ func resourceAwsIamUser() *schema.Resource {
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAwsIamUserName,
},
"path": &schema.Schema{
Type: schema.TypeString,
@ -52,7 +54,7 @@ func resourceAwsIamUser() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Delete user even if it has non-Terraform-managed IAM access keys",
Description: "Delete user even if it has non-Terraform-managed IAM access keys and login profile",
},
},
}
@ -165,7 +167,7 @@ func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error {
}
}
// All access keys for the user must be removed
// All access keys and login profile for the user must be removed
if d.Get("force_destroy").(bool) {
var accessKeys []string
listAccessKeys := &iam.ListAccessKeysInput{
@ -190,6 +192,16 @@ func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error deleting access key %s: %s", k, err)
}
}
_, err = iamconn.DeleteLoginProfile(&iam.DeleteLoginProfileInput{
UserName: aws.String(d.Id()),
})
if err != nil {
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
return nil
}
return fmt.Errorf("Error deleting Account Login Profile: %s", err)
}
}
request := &iam.DeleteUserInput{
@ -202,3 +214,13 @@ func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error {
}
return nil
}
func validateAwsIamUserName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9A-Za-z=,.@\-_]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alphanumeric characters, hyphens, underscores, commas, periods, @ symbols and equals signs allowed in %q: %q",
k, value))
}
return
}

View File

@ -0,0 +1,182 @@
package aws
import (
"encoding/base64"
"errors"
"fmt"
"log"
"math/rand"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/vault/helper/pgpkeys"
)
func resourceAwsIamUserLoginProfile() *schema.Resource {
return &schema.Resource{
Create: resourceAwsIamUserLoginProfileCreate,
Read: schema.Noop,
Update: schema.Noop,
Delete: schema.RemoveFromState,
Schema: map[string]*schema.Schema{
"user": {
Type: schema.TypeString,
Required: true,
},
"pgp_key": {
Type: schema.TypeString,
Required: true,
},
"password_reset_required": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"password_length": {
Type: schema.TypeInt,
Optional: true,
Default: 20,
ValidateFunc: validateAwsIamLoginProfilePasswordLength,
},
"key_fingerprint": {
Type: schema.TypeString,
Computed: true,
},
"encrypted_password": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func validateAwsIamLoginProfilePasswordLength(v interface{}, _ string) (_ []string, es []error) {
length := v.(int)
if length < 4 {
es = append(es, errors.New("minimum password_length is 4 characters"))
}
if length > 128 {
es = append(es, errors.New("maximum password_length is 128 characters"))
}
return
}
// generatePassword generates a random password of a given length using
// characters that are likely to satisfy any possible AWS password policy
// (given sufficient length).
func generatePassword(length int) string {
charsets := []string{
"abcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"012346789",
"!@#$%^&*()_+-=[]{}|'",
}
// Use all character sets
random := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
components := make(map[int]byte, length)
for i := 0; i < length; i++ {
charset := charsets[i%len(charsets)]
components[i] = charset[random.Intn(len(charset))]
}
// Randomise the ordering so we don't end up with a predictable
// lower case, upper case, numeric, symbol pattern
result := make([]byte, length)
i := 0
for _, b := range components {
result[i] = b
i = i + 1
}
return string(result)
}
func encryptPassword(password string, pgpKey string) (string, string, error) {
const keybasePrefix = "keybase:"
encryptionKey := pgpKey
if strings.HasPrefix(pgpKey, keybasePrefix) {
publicKeys, err := pgpkeys.FetchKeybasePubkeys([]string{pgpKey})
if err != nil {
return "", "", errwrap.Wrapf(
fmt.Sprintf("Error retrieving Public Key for %s: {{err}}", pgpKey), err)
}
encryptionKey = publicKeys[pgpKey]
}
fingerprints, encrypted, err := pgpkeys.EncryptShares([][]byte{[]byte(password)}, []string{encryptionKey})
if err != nil {
return "", "", errwrap.Wrapf(
fmt.Sprintf("Error encrypting password for %s: {{err}}", pgpKey), err)
}
return fingerprints[0], base64.StdEncoding.EncodeToString(encrypted[0]), nil
}
func resourceAwsIamUserLoginProfileCreate(d *schema.ResourceData, meta interface{}) error {
iamconn := meta.(*AWSClient).iamconn
username := d.Get("user").(string)
passwordResetRequired := d.Get("password_reset_required").(bool)
passwordLength := d.Get("password_length").(int)
_, err := iamconn.GetLoginProfile(&iam.GetLoginProfileInput{
UserName: aws.String(username),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchEntity" {
// If there is already a login profile, bring it under management (to prevent
// resource creation diffs) - we will never modify it, but obviously cannot
// set the password.
d.SetId(username)
d.Set("key_fingerprint", "")
d.Set("encrypted_password", "")
return nil
}
}
var pgpKey string
if pgpKeyInterface, ok := d.GetOk("pgp_key"); ok {
pgpKey = pgpKeyInterface.(string)
}
initialPassword := generatePassword(passwordLength)
fingerprint, encrypted, err := encryptPassword(initialPassword, pgpKey)
if err != nil {
return err
}
request := &iam.CreateLoginProfileInput{
UserName: aws.String(username),
Password: aws.String(initialPassword),
PasswordResetRequired: aws.Bool(passwordResetRequired),
}
log.Println("[DEBUG] Create IAM User Login Profile request:", request)
createResp, err := iamconn.CreateLoginProfile(request)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "EntityAlreadyExists" {
// If there is already a login profile, bring it under management (to prevent
// resource creation diffs) - we will never modify it, but obviously cannot
// set the password.
d.SetId(username)
d.Set("key_fingerprint", "")
d.Set("encrypted_password", "")
return nil
}
return errwrap.Wrapf(fmt.Sprintf("Error creating IAM User Login Profile for %q: {{err}}", username), err)
}
d.SetId(*createResp.LoginProfile.UserName)
d.Set("key_fingerprint", fingerprint)
d.Set("encrypted_password", encrypted)
return nil
}

View File

@ -0,0 +1,323 @@
package aws
import (
"errors"
"fmt"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/vault/helper/pgpkeys"
"regexp"
)
func TestAccAWSUserLoginProfile_basic(t *testing.T) {
var conf iam.GetLoginProfileOutput
username := fmt.Sprintf("test-user-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserLoginProfileDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSUserLoginProfileConfig(username, "/", testPubKey1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSUserLoginProfileExists("aws_iam_user_login_profile.user", &conf),
testDecryptPasswordAndTest("aws_iam_user_login_profile.user", "aws_iam_access_key.user", testPrivKey1),
),
},
},
})
}
func TestAccAWSUserLoginProfile_keybase(t *testing.T) {
var conf iam.GetLoginProfileOutput
username := fmt.Sprintf("test-user-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserLoginProfileDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSUserLoginProfileConfig(username, "/", "keybase:terraformacctest"),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSUserLoginProfileExists("aws_iam_user_login_profile.user", &conf),
resource.TestCheckResourceAttrSet("aws_iam_user_login_profile.user", "encrypted_password"),
resource.TestCheckResourceAttrSet("aws_iam_user_login_profile.user", "key_fingerprint"),
),
},
},
})
}
func TestAccAWSUserLoginProfile_keybaseDoesntExist(t *testing.T) {
username := fmt.Sprintf("test-user-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserLoginProfileDestroy,
Steps: []resource.TestStep{
{
// We own this account but it doesn't have any key associated with it
Config: testAccAWSUserLoginProfileConfig(username, "/", "keybase:terraform_nope"),
ExpectError: regexp.MustCompile(`Error retrieving Public Key`),
},
},
})
}
func TestAccAWSUserLoginProfile_notAKey(t *testing.T) {
username := fmt.Sprintf("test-user-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserLoginProfileDestroy,
Steps: []resource.TestStep{
{
// We own this account but it doesn't have any key associated with it
Config: testAccAWSUserLoginProfileConfig(username, "/", "lolimnotakey"),
ExpectError: regexp.MustCompile(`Error encrypting password`),
},
},
})
}
func testAccCheckAWSUserLoginProfileDestroy(s *terraform.State) error {
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_iam_user_login_profile" {
continue
}
// Try to get user
_, err := iamconn.GetLoginProfile(&iam.GetLoginProfileInput{
UserName: aws.String(rs.Primary.ID),
})
if err == nil {
return fmt.Errorf("still exists.")
}
// Verify the error is what we want
ec2err, ok := err.(awserr.Error)
if !ok {
return err
}
if ec2err.Code() != "NoSuchEntity" {
return err
}
}
return nil
}
func testDecryptPasswordAndTest(nProfile, nAccessKey, key string) resource.TestCheckFunc {
return func(s *terraform.State) error {
profileResource, ok := s.RootModule().Resources[nProfile]
if !ok {
return fmt.Errorf("Not found: %s", nProfile)
}
password, ok := profileResource.Primary.Attributes["encrypted_password"]
if !ok {
return errors.New("No password in state")
}
accessKeyResource, ok := s.RootModule().Resources[nAccessKey]
if !ok {
return fmt.Errorf("Not found: %s", nAccessKey)
}
accessKeyId := accessKeyResource.Primary.ID
secretAccessKey, ok := accessKeyResource.Primary.Attributes["secret"]
if !ok {
return errors.New("No secret access key in state")
}
decryptedPassword, err := pgpkeys.DecryptBytes(password, key)
if err != nil {
return fmt.Errorf("Error decrypting password: %s", err)
}
iamAsCreatedUserSession := session.New(&aws.Config{
Region: aws.String("us-west-2"),
Credentials: credentials.NewStaticCredentials(accessKeyId, secretAccessKey, ""),
})
_, err = iamAsCreatedUserSession.Config.Credentials.Get()
if err != nil {
return fmt.Errorf("Error getting session credentials: %s", err)
}
return resource.Retry(2*time.Minute, func() *resource.RetryError {
iamAsCreatedUser := iam.New(iamAsCreatedUserSession)
_, err = iamAsCreatedUser.ChangePassword(&iam.ChangePasswordInput{
OldPassword: aws.String(decryptedPassword.String()),
NewPassword: aws.String(generatePassword(20)),
})
if err != nil {
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "InvalidClientTokenId" {
return resource.RetryableError(err)
}
return resource.NonRetryableError(fmt.Errorf("Error changing decrypted password: %s", err))
}
return nil
})
}
}
func testAccCheckAWSUserLoginProfileExists(n string, res *iam.GetLoginProfileOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return errors.New("No UserName is set")
}
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
resp, err := iamconn.GetLoginProfile(&iam.GetLoginProfileInput{
UserName: aws.String(rs.Primary.ID),
})
if err != nil {
return err
}
*res = *resp
return nil
}
}
func testAccAWSUserLoginProfileConfig(r, p, key string) string {
return fmt.Sprintf(`
resource "aws_iam_user" "user" {
name = "%s"
path = "%s"
force_destroy = true
}
data "aws_caller_identity" "current" {}
data "aws_iam_policy_document" "user" {
statement {
effect = "Allow"
actions = ["iam:GetAccountPasswordPolicy"]
resources = ["*"]
}
statement {
effect = "Allow"
actions = ["iam:ChangePassword"]
resources = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:user/&{aws:username}"]
}
}
resource "aws_iam_user_policy" "user" {
name = "AllowChangeOwnPassword"
user = "${aws_iam_user.user.name}"
policy = "${data.aws_iam_policy_document.user.json}"
}
resource "aws_iam_access_key" "user" {
user = "${aws_iam_user.user.name}"
}
resource "aws_iam_user_login_profile" "user" {
user = "${aws_iam_user.user.name}"
pgp_key = "%s"
}
`, r, p, key)
}
const testPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg
S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B
HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD
cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE
A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB
C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa
QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn
aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y
jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb
6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N
ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu
9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu
lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN
C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0
YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi
oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH
/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI
PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O
9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx
8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd
OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
const testPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX
oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj
UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx
JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD
jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4
yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek
nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6
kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2
Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR
ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk
Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE
sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52
N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI
AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d
4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C
Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3
9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe
o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR
BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf
Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a
9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu
9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z
bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ
xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z
UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ
6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr
drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34
byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO
gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS
astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM
FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg
EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA
K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I
n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA
3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM
9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z
XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr
9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc
ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4
EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf
NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln
G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g
H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf
PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h
7u2CfYyFPu3AlUaGNMBlvy6PEpU=`

View File

@ -7,30 +7,35 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSUserPolicyAttachment_basic(t *testing.T) {
var out iam.ListAttachedUserPoliciesOutput
rName := acctest.RandString(10)
policyName1 := fmt.Sprintf("test-policy-%s", acctest.RandString(10))
policyName2 := fmt.Sprintf("test-policy-%s", acctest.RandString(10))
policyName3 := fmt.Sprintf("test-policy-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserPolicyAttachmentDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSUserPolicyAttachConfig,
{
Config: testAccAWSUserPolicyAttachConfig(rName, policyName1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSUserPolicyAttachmentExists("aws_iam_user_policy_attachment.test-attach", 1, &out),
testAccCheckAWSUserPolicyAttachmentAttributes([]string{"test-policy"}, &out),
testAccCheckAWSUserPolicyAttachmentAttributes([]string{policyName1}, &out),
),
},
resource.TestStep{
Config: testAccAWSUserPolicyAttachConfigUpdate,
{
Config: testAccAWSUserPolicyAttachConfigUpdate(rName, policyName1, policyName2, policyName3),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSUserPolicyAttachmentExists("aws_iam_user_policy_attachment.test-attach", 2, &out),
testAccCheckAWSUserPolicyAttachmentAttributes([]string{"test-policy2", "test-policy3"}, &out),
testAccCheckAWSUserPolicyAttachmentAttributes([]string{policyName2, policyName3}, &out),
),
},
},
@ -88,13 +93,14 @@ func testAccCheckAWSUserPolicyAttachmentAttributes(policies []string, out *iam.L
}
}
const testAccAWSUserPolicyAttachConfig = `
func testAccAWSUserPolicyAttachConfig(rName, policyName string) string {
return fmt.Sprintf(`
resource "aws_iam_user" "user" {
name = "test-user"
name = "test-user-%s"
}
resource "aws_iam_policy" "policy" {
name = "test-policy"
name = "%s"
description = "A test policy"
policy = <<EOF
{
@ -115,16 +121,17 @@ EOF
resource "aws_iam_user_policy_attachment" "test-attach" {
user = "${aws_iam_user.user.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}`, rName, policyName)
}
`
const testAccAWSUserPolicyAttachConfigUpdate = `
func testAccAWSUserPolicyAttachConfigUpdate(rName, policyName1, policyName2, policyName3 string) string {
return fmt.Sprintf(`
resource "aws_iam_user" "user" {
name = "test-user"
name = "test-user-%s"
}
resource "aws_iam_policy" "policy" {
name = "test-policy"
name = "%s"
description = "A test policy"
policy = <<EOF
{
@ -143,7 +150,7 @@ EOF
}
resource "aws_iam_policy" "policy2" {
name = "test-policy2"
name = "%s"
description = "A test policy"
policy = <<EOF
{
@ -162,7 +169,7 @@ EOF
}
resource "aws_iam_policy" "policy3" {
name = "test-policy3"
name = "%s"
description = "A test policy"
policy = <<EOF
{
@ -188,5 +195,5 @@ resource "aws_iam_user_policy_attachment" "test-attach" {
resource "aws_iam_user_policy_attachment" "test-attach2" {
user = "${aws_iam_user.user.name}"
policy_arn = "${aws_iam_policy.policy3.arn}"
}`, rName, policyName1, policyName2, policyName3)
}
`

View File

@ -7,30 +7,72 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestValidateIamUserName(t *testing.T) {
validNames := []string{
"test-user",
"test_user",
"testuser123",
"TestUser",
"Test-User",
"test.user",
"test.123,user",
"testuser@hashicorp",
}
for _, v := range validNames {
_, errors := validateAwsIamUserName(v, "name")
if len(errors) != 0 {
t.Fatalf("%q should be a valid IAM User name: %q", v, errors)
}
}
invalidNames := []string{
"!",
"/",
" ",
":",
";",
"test name",
"/slash-at-the-beginning",
"slash-at-the-end/",
}
for _, v := range invalidNames {
_, errors := validateAwsIamUserName(v, "name")
if len(errors) == 0 {
t.Fatalf("%q should be an invalid IAM User name", v)
}
}
}
func TestAccAWSUser_basic(t *testing.T) {
var conf iam.GetUserOutput
name1 := fmt.Sprintf("test-user-%d", acctest.RandInt())
name2 := fmt.Sprintf("test-user-%d", acctest.RandInt())
path1 := "/"
path2 := "/path2/"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSUserDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSUserConfig,
Config: testAccAWSUserConfig(name1, path1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSUserExists("aws_iam_user.user", &conf),
testAccCheckAWSUserAttributes(&conf, "test-user", "/"),
testAccCheckAWSUserAttributes(&conf, name1, "/"),
),
},
resource.TestStep{
Config: testAccAWSUserConfig2,
Config: testAccAWSUserConfig(name2, path2),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSUserExists("aws_iam_user.user", &conf),
testAccCheckAWSUserAttributes(&conf, "test-user2", "/path2/"),
testAccCheckAWSUserAttributes(&conf, name2, "/path2/"),
),
},
},
@ -106,15 +148,10 @@ func testAccCheckAWSUserAttributes(user *iam.GetUserOutput, name string, path st
}
}
const testAccAWSUserConfig = `
func testAccAWSUserConfig(r, p string) string {
return fmt.Sprintf(`
resource "aws_iam_user" "user" {
name = "test-user"
path = "/"
name = "%s"
path = "%s"
}`, r, p)
}
`
const testAccAWSUserConfig2 = `
resource "aws_iam_user" "user" {
name = "test-user2"
path = "/path2/"
}
`

View File

@ -41,6 +41,7 @@ func resourceAwsInstance() *schema.Resource {
"associate_public_ip_address": &schema.Schema{
Type: schema.TypeBool,
ForceNew: true,
Computed: true,
Optional: true,
},
@ -98,8 +99,7 @@ func resourceAwsInstance() *schema.Resource {
StateFunc: func(v interface{}) string {
switch v.(type) {
case string:
hash := sha1.Sum([]byte(v.(string)))
return hex.EncodeToString(hash[:])
return userDataHashSum(v.(string))
default:
return ""
}
@ -499,6 +499,7 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
if *ni.Attachment.DeviceIndex == 0 {
d.Set("subnet_id", ni.SubnetId)
d.Set("network_interface_id", ni.NetworkInterfaceId)
d.Set("associate_public_ip_address", ni.Association != nil)
}
}
} else {
@ -583,6 +584,18 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
}
d.Set("disable_api_termination", attr.DisableApiTermination.Value)
}
{
attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{
Attribute: aws.String(ec2.InstanceAttributeNameUserData),
InstanceId: aws.String(d.Id()),
})
if err != nil {
return err
}
if attr.UserData.Value != nil {
d.Set("user_data", userDataHashSum(*attr.UserData.Value))
}
}
return nil
}
@ -1166,3 +1179,16 @@ func iamInstanceProfileArnToName(ip *ec2.IamInstanceProfile) string {
parts := strings.Split(*ip.Arn, "/")
return parts[len(parts)-1]
}
func userDataHashSum(user_data string) string {
// Check whether the user_data is not Base64 encoded.
// Always calculate hash of base64 decoded value since we
// check against double-encoding when setting it
v, base64DecodeError := base64.StdEncoding.DecodeString(user_data)
if base64DecodeError != nil {
v = []byte(user_data)
}
hash := sha1.Sum(v)
return hex.EncodeToString(hash[:])
}

View File

@ -39,7 +39,7 @@ func TestAccAWSInstance_basic(t *testing.T) {
// we'll import as VPC security groups, which is fine. We verify
// VPC security group import in other tests
IDRefreshName: "aws_instance.foo",
IDRefreshIgnore: []string{"user_data", "security_groups", "vpc_security_group_ids"},
IDRefreshIgnore: []string{"security_groups", "vpc_security_group_ids"},
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceDestroy,
@ -195,7 +195,7 @@ func TestAccAWSInstance_blockDevices(t *testing.T) {
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_instance.foo",
IDRefreshIgnore: []string{
"ephemeral_block_device", "user_data", "security_groups", "vpc_security_groups"},
"ephemeral_block_device", "security_groups", "vpc_security_groups"},
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceDestroy,
Steps: []resource.TestStep{
@ -346,7 +346,7 @@ func TestAccAWSInstance_vpc(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_instance.foo",
IDRefreshIgnore: []string{"associate_public_ip_address", "user_data"},
IDRefreshIgnore: []string{"associate_public_ip_address"},
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceDestroy,
Steps: []resource.TestStep{
@ -358,7 +358,7 @@ func TestAccAWSInstance_vpc(t *testing.T) {
resource.TestCheckResourceAttr(
"aws_instance.foo",
"user_data",
"2fad308761514d9d73c3c7fdc877607e06cf950d"),
"562a3e32810edf6ff09994f050f12e799452379d"),
),
},
},

View File

@ -33,6 +33,10 @@ func resourceAwsLambdaAlias() *schema.Resource {
Type: schema.TypeString,
Required: true,
},
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
@ -84,6 +88,7 @@ func resourceAwsLambdaAliasRead(d *schema.ResourceData, meta interface{}) error
d.Set("description", aliasConfiguration.Description)
d.Set("function_version", aliasConfiguration.FunctionVersion)
d.Set("name", aliasConfiguration.Name)
d.Set("arn", aliasConfiguration.AliasArn)
return nil
}

View File

@ -2,6 +2,7 @@ package aws
import (
"fmt"
"regexp"
"testing"
"github.com/aws/aws-sdk-go/aws"
@ -23,6 +24,7 @@ func TestAccAWSLambdaAlias_basic(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsLambdaAliasExists("aws_lambda_alias.lambda_alias_test", &conf),
testAccCheckAwsLambdaAttributes(&conf),
resource.TestMatchResourceAttr("aws_lambda_alias.lambda_alias_test", "arn", regexp.MustCompile(`^arn:aws:lambda:[a-z]+-[a-z]+-[0-9]+:\d{12}:function:example_lambda_name_create:testalias$`)),
),
},
},

View File

@ -17,6 +17,8 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
const awsMutexLambdaKey = `aws_lambda_function`
func resourceAwsLambdaFunction() *schema.Resource {
return &schema.Resource{
Create: resourceAwsLambdaFunctionCreate,
@ -154,6 +156,11 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
var functionCode *lambda.FunctionCode
if v, ok := d.GetOk("filename"); ok {
// Grab an exclusive lock so that we're only reading one function into
// memory at a time.
// See https://github.com/hashicorp/terraform/issues/9364
awsMutexKV.Lock(awsMutexLambdaKey)
defer awsMutexKV.Unlock(awsMutexLambdaKey)
file, err := loadFileContent(v.(string))
if err != nil {
return fmt.Errorf("Unable to load %q: %s", v.(string), err)
@ -361,6 +368,11 @@ func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) e
}
if v, ok := d.GetOk("filename"); ok {
// Grab an exclusive lock so that we're only reading one function into
// memory at a time.
// See https://github.com/hashicorp/terraform/issues/9364
awsMutexKV.Lock(awsMutexLambdaKey)
defer awsMutexKV.Unlock(awsMutexLambdaKey)
file, err := loadFileContent(v.(string))
if err != nil {
return fmt.Errorf("Unable to load %q: %s", v.(string), err)

View File

@ -545,6 +545,7 @@ func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{}
d.Set("spot_price", lc.SpotPrice)
d.Set("enable_monitoring", lc.InstanceMonitoring.Enabled)
d.Set("security_groups", lc.SecurityGroups)
d.Set("associate_public_ip_address", lc.AssociatePublicIpAddress)
d.Set("vpc_classic_link_id", lc.ClassicLinkVPCId)
d.Set("vpc_classic_link_security_groups", lc.ClassicLinkVPCSecurityGroups)

View File

@ -228,7 +228,25 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
},
},
})
}
func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
var networkAcl ec2.NetworkAcl
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_network_acl.testesp",
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSNetworkAclEsp,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSNetworkAclExists("aws_network_acl.testesp", &networkAcl),
),
},
},
})
}
func testAccCheckAWSNetworkAclDestroy(s *terraform.State) error {
@ -638,3 +656,26 @@ resource "aws_network_acl" "bar" {
}
}
`
const testAccAWSNetworkAclEsp = `
resource "aws_vpc" "testespvpc" {
cidr_block = "10.1.0.0/16"
}
resource "aws_network_acl" "testesp" {
vpc_id = "${aws_vpc.testespvpc.id}"
egress {
protocol = "esp"
rule_no = 5
action = "allow"
cidr_block = "10.3.0.0/18"
from_port = 0
to_port = 0
}
tags {
Name = "test_esp"
}
}
`

View File

@ -23,7 +23,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsOpsworksCustomLayerConfigCreate(stackName),
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "name", stackName,
@ -187,7 +187,7 @@ resource "aws_security_group" "tf-ops-acc-layer2" {
}`, name, name)
}
func testAccAwsOpsworksCustomLayerConfigCreate(name string) string {
func testAccAwsOpsworksCustomLayerConfigNoVpcCreate(name string) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-east-1"
@ -224,6 +224,43 @@ resource "aws_opsworks_custom_layer" "tf-acc" {
`, name, testAccAwsOpsworksStackConfigNoVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
func testAccAwsOpsworksCustomLayerConfigVpcCreate(name string) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-west-2"
}
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = false
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
]
drain_elb_on_shutdown = true
instance_shutdown_timeout = 300
system_packages = [
"git",
"golang",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
}
%s
%s
`, name, testAccAwsOpsworksStackConfigVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
func testAccAwsOpsworksCustomLayerConfigUpdate(name string) string {
return fmt.Sprintf(`
provider "aws" {

View File

@ -20,6 +20,9 @@ func resourceAwsOpsworksStack() *schema.Resource {
Read: resourceAwsOpsworksStackRead,
Update: resourceAwsOpsworksStackUpdate,
Delete: resourceAwsOpsworksStackDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"agent_version": &schema.Schema{
@ -283,6 +286,9 @@ func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) erro
d.Set("default_subnet_id", stack.DefaultSubnetId)
d.Set("hostname_theme", stack.HostnameTheme)
d.Set("use_custom_cookbooks", stack.UseCustomCookbooks)
if stack.CustomJson != nil {
d.Set("custom_json", stack.CustomJson)
}
d.Set("use_opsworks_security_groups", stack.UseOpsworksSecurityGroups)
d.Set("vpc_id", stack.VpcId)
if color, ok := stack.Attributes["Color"]; ok {
@ -332,6 +338,9 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er
if defaultAvailabilityZone, ok := d.GetOk("default_availability_zone"); ok {
req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string))
}
if defaultRootDeviceType, ok := d.GetOk("default_root_device_type"); ok {
req.DefaultRootDeviceType = aws.String(defaultRootDeviceType.(string))
}
log.Printf("[DEBUG] Creating OpsWorks stack: %s", req)

Some files were not shown because too many files have changed in this diff Show More