Merge remote-tracking branch 'upstream/master' into gcp_compute_disk_snapshot
This commit is contained in:
commit
731fceaae5
13
.travis.yml
13
.travis.yml
|
@ -1,7 +1,20 @@
|
||||||
|
dist: trusty
|
||||||
sudo: false
|
sudo: false
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.8
|
- 1.8
|
||||||
|
|
||||||
|
env:
|
||||||
|
- CONSUL_VERSION=0.7.5 TF_CONSUL_TEST=1 GOMAXPROCS=4
|
||||||
|
|
||||||
|
# Fetch consul for the backend and provider tests
|
||||||
|
before_install:
|
||||||
|
- curl -sLo consul.zip https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip
|
||||||
|
- unzip consul.zip
|
||||||
|
- mkdir ~/bin
|
||||||
|
- mv consul ~/bin
|
||||||
|
- export PATH="~/bin:$PATH"
|
||||||
|
|
||||||
install:
|
install:
|
||||||
# This script is used by the Travis build to install a cookie for
|
# This script is used by the Travis build to install a cookie for
|
||||||
# go.googlesource.com so rate limits are higher when using `go get` to fetch
|
# go.googlesource.com so rate limits are higher when using `go get` to fetch
|
||||||
|
|
164
CHANGELOG.md
164
CHANGELOG.md
|
@ -1,10 +1,166 @@
|
||||||
**TEMPORARY NOTE:** The "master" branch CHANGELOG also includes any changes
|
## 0.9.1 (unreleased)
|
||||||
in the branch "0-8-stable". The "master" branch is currently a development
|
|
||||||
branch for the next major version of Terraform.
|
|
||||||
|
|
||||||
## 0.9.0-beta3 (unreleased)
|
BACKWARDS IMCOMPATIBILITIES / NOTES:
|
||||||
|
|
||||||
|
* provider/pagerduty: the deprecated `name_regex` field has been removed from vendor data source [GH-12396]
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* **New Provider:** `kubernetes` [GH-12372]
|
||||||
|
* **New Resource:** `kubernetes_namespace` [GH-12372]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* provider/aws: Get the aws_lambda_function attributes when there are great than 50 versions of a function [GH-11745]
|
||||||
|
* provider/google: Fix the Google provider asking for account_file input on every run [GH-12729]
|
||||||
|
|
||||||
|
|
||||||
|
## 0.9.0 (March 15, 2017)
|
||||||
|
|
||||||
|
**This is the complete 0.8.8 to 0.9 CHANGELOG. Below this section we also have a 0.9.0-beta2 to 0.9.0 final CHANGELOG.**
|
||||||
|
|
||||||
|
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||||
|
|
||||||
|
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503))
|
||||||
|
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||||
|
* provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive
|
||||||
|
* provider/azurerm: sql_server `administrator_login_password` now marked as sensitive
|
||||||
|
* provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token
|
||||||
|
* provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044))
|
||||||
|
* provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223))
|
||||||
|
* provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663))
|
||||||
|
* provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668))
|
||||||
|
* provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659))
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* **Remote Backends:** This is a successor to "remote state" and includes
|
||||||
|
file-based configuration, an improved setup process (just run `terraform init`),
|
||||||
|
no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286))
|
||||||
|
* **Destroy Provisioners:** Provisioners can now be configured to run
|
||||||
|
on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329))
|
||||||
|
* **State Locking:** State will be automatically locked when supported by the backend.
|
||||||
|
Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187))
|
||||||
|
* **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration.
|
||||||
|
* **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||||
|
* **New Data Source:** `openstack_networking_network_v2` ([#12304](https://github.com/hashicorp/terraform/issues/12304))
|
||||||
|
* **New Resource:** `aws_iam_account_alias` ([#12648](https://github.com/hashicorp/terraform/issues/12648))
|
||||||
|
* **New Resource:** `datadog_downtime` ([#10994](https://github.com/hashicorp/terraform/issues/10994))
|
||||||
|
* **New Resource:** `ns1_notifylist` ([#12373](https://github.com/hashicorp/terraform/issues/12373))
|
||||||
|
* **New Resource:** `google_container_node_pool` ([#11802](https://github.com/hashicorp/terraform/issues/11802))
|
||||||
|
* **New Resource:** `rancher_certificate` ([#12717](https://github.com/hashicorp/terraform/issues/12717))
|
||||||
|
* **New Resource:** `rancher_host` ([#11545](https://github.com/hashicorp/terraform/issues/11545))
|
||||||
|
* helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311))
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482))
|
||||||
|
* core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929))
|
||||||
|
* core: report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383))
|
||||||
|
* command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922))
|
||||||
|
* command/init: previous behavior is retained, but init now also configures
|
||||||
|
the new remote backends as well as downloads modules. It is the single
|
||||||
|
command to initialize a new or existing Terraform configuration.
|
||||||
|
* command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261))
|
||||||
|
* provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188))
|
||||||
|
* provider/aws: Return errors from Elastic Beanstalk ([#12425](https://github.com/hashicorp/terraform/issues/12425))
|
||||||
|
* provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668))
|
||||||
|
* provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694))
|
||||||
|
* provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695))
|
||||||
|
* provider/aws: Add support for ACM certificates to `api_gateway_domain_name` ([#12592](https://github.com/hashicorp/terraform/issues/12592))
|
||||||
|
* provider/aws: Add support for IPv6 to aws\_security\_group\_rule ([#12645](https://github.com/hashicorp/terraform/issues/12645))
|
||||||
|
* provider/aws: Add IPv6 Support to aws\_route\_table ([#12640](https://github.com/hashicorp/terraform/issues/12640))
|
||||||
|
* provider/aws: Add support for IPv6 to aws\_network\_acl\_rule ([#12644](https://github.com/hashicorp/terraform/issues/12644))
|
||||||
|
* provider/aws: Add support for IPv6 to aws\_default\_route\_table ([#12642](https://github.com/hashicorp/terraform/issues/12642))
|
||||||
|
* provider/aws: Add support for IPv6 to aws\_network\_acl ([#12641](https://github.com/hashicorp/terraform/issues/12641))
|
||||||
|
* provider/aws: Add support for IPv6 in aws\_route ([#12639](https://github.com/hashicorp/terraform/issues/12639))
|
||||||
|
* provider/aws: Add support for IPv6 to aws\_security\_group ([#12655](https://github.com/hashicorp/terraform/issues/12655))
|
||||||
|
* provider/aws: Add replace\_unhealthy\_instances to spot\_fleet\_request ([#12681](https://github.com/hashicorp/terraform/issues/12681))
|
||||||
|
* provider/aws: Remove restriction on running aws\_opsworks\_* on us-east-1 ([#12688](https://github.com/hashicorp/terraform/issues/12688))
|
||||||
|
* provider/aws: Improve error message on S3 Bucket Object deletion ([#12712](https://github.com/hashicorp/terraform/issues/12712))
|
||||||
|
* provider/aws: Add log message about if changes are being applied now or later ([#12691](https://github.com/hashicorp/terraform/issues/12691))
|
||||||
|
* provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982))
|
||||||
|
* provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004))
|
||||||
|
* provider/azurerm: Add support for managed availability sets. ([#12532](https://github.com/hashicorp/terraform/issues/12532))
|
||||||
|
* provider/azurerm: Add support for extensions on virtual machine scale sets ([#12124](https://github.com/hashicorp/terraform/issues/12124))
|
||||||
|
* provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760))
|
||||||
|
* provider/docker: added support for linux capabilities ([#12045](https://github.com/hashicorp/terraform/issues/12045))
|
||||||
|
* provider/fastly: Add Fastly SSL validation fields ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||||
|
* provider/ignition: Migrate all of the igition resources to data sources ([#11851](https://github.com/hashicorp/terraform/issues/11851))
|
||||||
|
* provider/openstack: Set Availability Zone in Instances ([#12610](https://github.com/hashicorp/terraform/issues/12610))
|
||||||
|
* provider/openstack: Force Deletion of Instances ([#12689](https://github.com/hashicorp/terraform/issues/12689))
|
||||||
|
* provider/rancher: Better comparison of compose files ([#12561](https://github.com/hashicorp/terraform/issues/12561))
|
||||||
|
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||||
|
* provider/vault: read vault token from `~/.vault-token` as a fallback for the
|
||||||
|
`VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529))
|
||||||
|
* provisioners: All provisioners now respond very quickly to interrupts for
|
||||||
|
fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934))
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050))
|
||||||
|
* core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210))
|
||||||
|
* provider/aws: Fixes issue for aws_lb_ssl_negotiation_policy of already deleted ELB ([#12360](https://github.com/hashicorp/terraform/issues/12360))
|
||||||
|
* provider/aws: Populate the iam_instance_profile uniqueId ([#12449](https://github.com/hashicorp/terraform/issues/12449))
|
||||||
|
* provider/aws: Only send iops when creating io1 devices ([#12392](https://github.com/hashicorp/terraform/issues/12392))
|
||||||
|
* provider/aws: Fix spurious aws_spot_fleet_request diffs ([#12437](https://github.com/hashicorp/terraform/issues/12437))
|
||||||
|
* provider/aws: Changing volumes in ECS task definition should force new revision ([#11403](https://github.com/hashicorp/terraform/issues/11403))
|
||||||
|
* provider/aws: Ignore whitespace in json diff for aws_dms_replication_task options ([#12380](https://github.com/hashicorp/terraform/issues/12380))
|
||||||
|
* provider/aws: Check spot instance is running before trying to attach volumes ([#12459](https://github.com/hashicorp/terraform/issues/12459))
|
||||||
|
* provider/aws: Add the IPV6 cidr block to the vpc datasource ([#12529](https://github.com/hashicorp/terraform/issues/12529))
|
||||||
|
* provider/aws: Error on trying to recreate an existing customer gateway ([#12501](https://github.com/hashicorp/terraform/issues/12501))
|
||||||
|
* provider/aws: Prevent aws_dms_replication_task panic ([#12539](https://github.com/hashicorp/terraform/issues/12539))
|
||||||
|
* provider/aws: output the task definition name when errors occur during refresh ([#12609](https://github.com/hashicorp/terraform/issues/12609))
|
||||||
|
* provider/aws: Refresh iam saml provider from state on 404 ([#12602](https://github.com/hashicorp/terraform/issues/12602))
|
||||||
|
* provider/aws: Add address, port, hosted_zone_id and endpoint for aws_db_instance datasource ([#12623](https://github.com/hashicorp/terraform/issues/12623))
|
||||||
|
* provider/aws: Allow recreation of `aws_opsworks_user_profile` when the `user_arn` is changed ([#12595](https://github.com/hashicorp/terraform/issues/12595))
|
||||||
|
* provider/aws: Guard clause to prevent panic on ELB connectionSettings ([#12685](https://github.com/hashicorp/terraform/issues/12685))
|
||||||
|
* provider/azurerm: bug fix to prevent crashes during azurerm_container_service provisioning ([#12516](https://github.com/hashicorp/terraform/issues/12516))
|
||||||
|
* provider/cobbler: Fix Profile Repos ([#12452](https://github.com/hashicorp/terraform/issues/12452))
|
||||||
|
* provider/datadog: Update to datadog_monitor to use default values ([#12497](https://github.com/hashicorp/terraform/issues/12497))
|
||||||
|
* provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903))
|
||||||
|
* provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336))
|
||||||
|
* provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387))
|
||||||
|
* provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662))
|
||||||
|
* provider/google: Minor correction : "Deleting disk" message in Delete method ([#12521](https://github.com/hashicorp/terraform/issues/12521))
|
||||||
|
* provider/mysql: Avoid crash on un-interpolated provider cfg ([#12391](https://github.com/hashicorp/terraform/issues/12391))
|
||||||
|
* provider/ns1: Fix incorrect schema (causing crash) for 'ns1_user.notify' ([#12721](https://github.com/hashicorp/terraform/issues/12721))
|
||||||
|
* provider/openstack: Handle cases where volumes are disabled ([#12374](https://github.com/hashicorp/terraform/issues/12374))
|
||||||
|
* provider/openstack: Toggle Creation of Default Security Group Rules ([#12119](https://github.com/hashicorp/terraform/issues/12119))
|
||||||
|
* provider/openstack: Change Port fixed_ip to a Set ([#12613](https://github.com/hashicorp/terraform/issues/12613))
|
||||||
|
* provider/openstack: Add network_id to Network data source ([#12615](https://github.com/hashicorp/terraform/issues/12615))
|
||||||
|
* provider/openstack: Check for ErrDefault500 when creating/deleting pool member ([#12664](https://github.com/hashicorp/terraform/issues/12664))
|
||||||
|
* provider/rancher: Apply the set value for finish_upgrade to set to prevent recurring plans ([#12545](https://github.com/hashicorp/terraform/issues/12545))
|
||||||
|
* provider/scaleway: work around API concurrency issue ([#12707](https://github.com/hashicorp/terraform/issues/12707))
|
||||||
|
* provider/statuscake: use default status code list when updating test ([#12375](https://github.com/hashicorp/terraform/issues/12375))
|
||||||
|
|
||||||
|
## 0.9.0 from 0.9.0-beta2 (March 15, 2017)
|
||||||
|
|
||||||
|
**This only includes changes from 0.9.0-beta2 to 0.9.0 final. The section above has the complete 0.8.x to 0.9.0 CHANGELOG.**
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||||
|
|
||||||
|
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||||
|
|
||||||
|
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503))
|
||||||
|
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||||
|
* provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223))
|
||||||
|
* provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663))
|
||||||
|
* provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668))
|
||||||
|
* provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659))
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||||
|
* report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383))
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336))
|
||||||
|
* provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387))
|
||||||
|
* provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662))
|
||||||
|
|
||||||
## 0.9.0-beta2 (March 2, 2017)
|
## 0.9.0-beta2 (March 2, 2017)
|
||||||
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -38,7 +38,7 @@ plugin-dev: generate
|
||||||
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
|
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
|
||||||
|
|
||||||
# test runs the unit tests
|
# test runs the unit tests
|
||||||
test:# fmtcheck errcheck generate
|
test: fmtcheck errcheck generate
|
||||||
go test -i $(TEST) || exit 1
|
go test -i $(TEST) || exit 1
|
||||||
echo $(TEST) | \
|
echo $(TEST) | \
|
||||||
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
Terraform
|
Terraform
|
||||||
=========
|
=========
|
||||||
|
|
||||||
- Website: http://www.terraform.io
|
- Website: https://www.terraform.io
|
||||||
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
|
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
|
||||||
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
|
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ All documentation is available on the [Terraform website](http://www.terraform.i
|
||||||
Developing Terraform
|
Developing Terraform
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.7+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||||
|
|
||||||
For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
|
For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,7 @@ func (b *Local) States() ([]string, error) {
|
||||||
// the listing always start with "default"
|
// the listing always start with "default"
|
||||||
envs := []string{backend.DefaultStateName}
|
envs := []string{backend.DefaultStateName}
|
||||||
|
|
||||||
entries, err := ioutil.ReadDir(DefaultEnvDir)
|
entries, err := ioutil.ReadDir(b.stateEnvDir())
|
||||||
// no error if there's no envs configured
|
// no error if there's no envs configured
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return envs, nil
|
return envs, nil
|
||||||
|
@ -166,7 +166,7 @@ func (b *Local) DeleteState(name string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(b.states, name)
|
delete(b.states, name)
|
||||||
return os.RemoveAll(filepath.Join(DefaultEnvDir, name))
|
return os.RemoveAll(filepath.Join(b.stateEnvDir(), name))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Local) State(name string) (state.State, error) {
|
func (b *Local) State(name string) (state.State, error) {
|
||||||
|
@ -320,17 +320,12 @@ func (b *Local) StatePaths(name string) (string, string, string) {
|
||||||
name = backend.DefaultStateName
|
name = backend.DefaultStateName
|
||||||
}
|
}
|
||||||
|
|
||||||
envDir := DefaultEnvDir
|
|
||||||
if b.StateEnvDir != "" {
|
|
||||||
envDir = b.StateEnvDir
|
|
||||||
}
|
|
||||||
|
|
||||||
if name == backend.DefaultStateName {
|
if name == backend.DefaultStateName {
|
||||||
if statePath == "" {
|
if statePath == "" {
|
||||||
statePath = DefaultStateFilename
|
statePath = DefaultStateFilename
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
statePath = filepath.Join(envDir, name, DefaultStateFilename)
|
statePath = filepath.Join(b.stateEnvDir(), name, DefaultStateFilename)
|
||||||
}
|
}
|
||||||
|
|
||||||
if stateOutPath == "" {
|
if stateOutPath == "" {
|
||||||
|
@ -353,12 +348,7 @@ func (b *Local) createState(name string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
envDir := DefaultEnvDir
|
stateDir := filepath.Join(b.stateEnvDir(), name)
|
||||||
if b.StateEnvDir != "" {
|
|
||||||
envDir = b.StateEnvDir
|
|
||||||
}
|
|
||||||
|
|
||||||
stateDir := filepath.Join(envDir, name)
|
|
||||||
s, err := os.Stat(stateDir)
|
s, err := os.Stat(stateDir)
|
||||||
if err == nil && s.IsDir() {
|
if err == nil && s.IsDir() {
|
||||||
// no need to check for os.IsNotExist, since that is covered by os.MkdirAll
|
// no need to check for os.IsNotExist, since that is covered by os.MkdirAll
|
||||||
|
@ -374,6 +364,15 @@ func (b *Local) createState(name string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stateEnvDir returns the directory where state environments are stored.
|
||||||
|
func (b *Local) stateEnvDir() string {
|
||||||
|
if b.StateEnvDir != "" {
|
||||||
|
return b.StateEnvDir
|
||||||
|
}
|
||||||
|
|
||||||
|
return DefaultEnvDir
|
||||||
|
}
|
||||||
|
|
||||||
// currentStateName returns the name of the current named state as set in the
|
// currentStateName returns the name of the current named state as set in the
|
||||||
// configuration files.
|
// configuration files.
|
||||||
// If there are no configured environments, currentStateName returns "default"
|
// If there are no configured environments, currentStateName returns "default"
|
||||||
|
|
|
@ -20,6 +20,11 @@ func TestLocal_impl(t *testing.T) {
|
||||||
var _ backend.CLI = new(Local)
|
var _ backend.CLI = new(Local)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLocal_backend(t *testing.T) {
|
||||||
|
b := TestLocal(t)
|
||||||
|
backend.TestBackend(t, b, b)
|
||||||
|
}
|
||||||
|
|
||||||
func checkState(t *testing.T, path, expected string) {
|
func checkState(t *testing.T, path, expected string) {
|
||||||
// Read the state
|
// Read the state
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
|
|
|
@ -21,6 +21,7 @@ func TestLocal(t *testing.T) *Local {
|
||||||
StatePath: filepath.Join(tempDir, "state.tfstate"),
|
StatePath: filepath.Join(tempDir, "state.tfstate"),
|
||||||
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
|
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
|
||||||
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
|
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
|
||||||
|
StateEnvDir: filepath.Join(tempDir, "state.tfstate.d"),
|
||||||
ContextOpts: &terraform.ContextOpts{},
|
ContextOpts: &terraform.ContextOpts{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,20 @@ func New() backend.Backend {
|
||||||
Description: "HTTP Auth in the format of 'username:password'",
|
Description: "HTTP Auth in the format of 'username:password'",
|
||||||
Default: "", // To prevent input
|
Default: "", // To prevent input
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"gzip": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Compress the state data using gzip",
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"lock": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Lock state access",
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,13 +78,18 @@ func New() backend.Backend {
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
*schema.Backend
|
*schema.Backend
|
||||||
|
|
||||||
|
// The fields below are set from configure
|
||||||
configData *schema.ResourceData
|
configData *schema.ResourceData
|
||||||
|
lock bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Backend) configure(ctx context.Context) error {
|
func (b *Backend) configure(ctx context.Context) error {
|
||||||
// Grab the resource data
|
// Grab the resource data
|
||||||
b.configData = schema.FromContextBackendConfig(ctx)
|
b.configData = schema.FromContextBackendConfig(ctx)
|
||||||
|
|
||||||
|
// Store the lock information
|
||||||
|
b.lock = b.configData.Get("lock").(bool)
|
||||||
|
|
||||||
// Initialize a client to test config
|
// Initialize a client to test config
|
||||||
_, err := b.clientRaw()
|
_, err := b.clientRaw()
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -85,27 +85,39 @@ func (b *Backend) State(name string) (state.State, error) {
|
||||||
// Determine the path of the data
|
// Determine the path of the data
|
||||||
path := b.path(name)
|
path := b.path(name)
|
||||||
|
|
||||||
|
// Determine whether to gzip or not
|
||||||
|
gzip := b.configData.Get("gzip").(bool)
|
||||||
|
|
||||||
// Build the state client
|
// Build the state client
|
||||||
stateMgr := &remote.State{
|
var stateMgr state.State = &remote.State{
|
||||||
Client: &RemoteClient{
|
Client: &RemoteClient{
|
||||||
Client: client,
|
Client: client,
|
||||||
Path: path,
|
Path: path,
|
||||||
|
GZip: gzip,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we're not locking, disable it
|
||||||
|
if !b.lock {
|
||||||
|
stateMgr = &state.LockDisabled{Inner: stateMgr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the locker, which we know always exists
|
||||||
|
stateMgrLocker := stateMgr.(state.Locker)
|
||||||
|
|
||||||
// Grab a lock, we use this to write an empty state if one doesn't
|
// Grab a lock, we use this to write an empty state if one doesn't
|
||||||
// exist already. We have to write an empty state as a sentinel value
|
// exist already. We have to write an empty state as a sentinel value
|
||||||
// so States() knows it exists.
|
// so States() knows it exists.
|
||||||
lockInfo := state.NewLockInfo()
|
lockInfo := state.NewLockInfo()
|
||||||
lockInfo.Operation = "init"
|
lockInfo.Operation = "init"
|
||||||
lockId, err := stateMgr.Lock(lockInfo)
|
lockId, err := stateMgrLocker.Lock(lockInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to lock state in Consul: %s", err)
|
return nil, fmt.Errorf("failed to lock state in Consul: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Local helper function so we can call it multiple places
|
// Local helper function so we can call it multiple places
|
||||||
lockUnlock := func(parent error) error {
|
lockUnlock := func(parent error) error {
|
||||||
if err := stateMgr.Unlock(lockId); err != nil {
|
if err := stateMgrLocker.Unlock(lockId); err != nil {
|
||||||
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
|
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,10 +2,12 @@ package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
"github.com/hashicorp/terraform/backend"
|
"github.com/hashicorp/terraform/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -13,19 +15,80 @@ func TestBackend_impl(t *testing.T) {
|
||||||
var _ backend.Backend = new(Backend)
|
var _ backend.Backend = new(Backend)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackend(t *testing.T) {
|
func newConsulTestServer(t *testing.T) *testutil.TestServer {
|
||||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == ""
|
||||||
if addr == "" {
|
if skip {
|
||||||
t.Log("consul tests require CONSUL_HTTP_ADDR")
|
t.Log("consul server tests require setting TF_ACC or TF_CONSUL_TEST")
|
||||||
t.Skip()
|
t.Skip()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the backend
|
srv := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
|
||||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
c.LogLevel = "warn"
|
||||||
"address": addr,
|
|
||||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
if !testing.Verbose() {
|
||||||
|
c.Stdout = ioutil.Discard
|
||||||
|
c.Stderr = ioutil.Discard
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return srv
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackend(t *testing.T) {
|
||||||
|
srv := newConsulTestServer(t)
|
||||||
|
defer srv.Stop()
|
||||||
|
|
||||||
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
|
// Get the backend. We need two to test locking.
|
||||||
|
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": path,
|
||||||
|
})
|
||||||
|
|
||||||
|
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": path,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test
|
// Test
|
||||||
backend.TestBackend(t, b)
|
backend.TestBackend(t, b1, b2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackend_lockDisabled(t *testing.T) {
|
||||||
|
srv := newConsulTestServer(t)
|
||||||
|
defer srv.Stop()
|
||||||
|
|
||||||
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
|
// Get the backend. We need two to test locking.
|
||||||
|
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": path,
|
||||||
|
"lock": false,
|
||||||
|
})
|
||||||
|
|
||||||
|
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": path + "different", // Diff so locking test would fail if it was locking
|
||||||
|
"lock": false,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test
|
||||||
|
backend.TestBackend(t, b1, b2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackend_gzip(t *testing.T) {
|
||||||
|
srv := newConsulTestServer(t)
|
||||||
|
defer srv.Stop()
|
||||||
|
|
||||||
|
// Get the backend
|
||||||
|
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||||
|
"gzip": true,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test
|
||||||
|
backend.TestBackend(t, b, nil)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package consul
|
package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -22,6 +24,7 @@ const (
|
||||||
type RemoteClient struct {
|
type RemoteClient struct {
|
||||||
Client *consulapi.Client
|
Client *consulapi.Client
|
||||||
Path string
|
Path string
|
||||||
|
GZip bool
|
||||||
|
|
||||||
consulLock *consulapi.Lock
|
consulLock *consulapi.Lock
|
||||||
lockCh <-chan struct{}
|
lockCh <-chan struct{}
|
||||||
|
@ -36,18 +39,37 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
payload := pair.Value
|
||||||
|
// If the payload starts with 0x1f, it's gzip, not json
|
||||||
|
if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' {
|
||||||
|
if data, err := uncompressState(pair.Value); err == nil {
|
||||||
|
payload = data
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
md5 := md5.Sum(pair.Value)
|
md5 := md5.Sum(pair.Value)
|
||||||
return &remote.Payload{
|
return &remote.Payload{
|
||||||
Data: pair.Value,
|
Data: payload,
|
||||||
MD5: md5[:],
|
MD5: md5[:],
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RemoteClient) Put(data []byte) error {
|
func (c *RemoteClient) Put(data []byte) error {
|
||||||
|
payload := data
|
||||||
|
if c.GZip {
|
||||||
|
if compressedState, err := compressState(data); err == nil {
|
||||||
|
payload = compressedState
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
kv := c.Client.KV()
|
kv := c.Client.KV()
|
||||||
_, err := kv.Put(&consulapi.KVPair{
|
_, err := kv.Put(&consulapi.KVPair{
|
||||||
Key: c.Path,
|
Key: c.Path,
|
||||||
Value: data,
|
Value: payload,
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -177,3 +199,31 @@ func (c *RemoteClient) Unlock(id string) error {
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func compressState(data []byte) ([]byte, error) {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
gz := gzip.NewWriter(b)
|
||||||
|
if _, err := gz.Write(data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gz.Flush(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gz.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uncompressState(data []byte) ([]byte, error) {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
gz, err := gzip.NewReader(bytes.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.ReadFrom(gz)
|
||||||
|
if err := gz.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -16,15 +15,12 @@ func TestRemoteClient_impl(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteClient(t *testing.T) {
|
func TestRemoteClient(t *testing.T) {
|
||||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
srv := newConsulTestServer(t)
|
||||||
if addr == "" {
|
defer srv.Stop()
|
||||||
t.Log("consul tests require CONSUL_HTTP_ADDR")
|
|
||||||
t.Skip()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the backend
|
// Get the backend
|
||||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
"address": addr,
|
"address": srv.HTTPAddr,
|
||||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -38,18 +34,54 @@ func TestRemoteClient(t *testing.T) {
|
||||||
remote.TestClient(t, state.(*remote.State).Client)
|
remote.TestClient(t, state.(*remote.State).Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConsul_stateLock(t *testing.T) {
|
// test the gzip functionality of the client
|
||||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
||||||
if addr == "" {
|
srv := newConsulTestServer(t)
|
||||||
t.Log("consul lock tests require CONSUL_HTTP_ADDR")
|
defer srv.Stop()
|
||||||
t.Skip()
|
|
||||||
|
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
|
// Get the backend
|
||||||
|
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": statePath,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Grab the client
|
||||||
|
state, err := b.State(backend.DefaultStateName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test
|
||||||
|
remote.TestClient(t, state.(*remote.State).Client)
|
||||||
|
|
||||||
|
// create a new backend with gzip
|
||||||
|
b = backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
|
"address": srv.HTTPAddr,
|
||||||
|
"path": statePath,
|
||||||
|
"gzip": true,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Grab the client
|
||||||
|
state, err = b.State(backend.DefaultStateName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test
|
||||||
|
remote.TestClient(t, state.(*remote.State).Client)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsul_stateLock(t *testing.T) {
|
||||||
|
srv := newConsulTestServer(t)
|
||||||
|
defer srv.Stop()
|
||||||
|
|
||||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
// create 2 instances to get 2 remote.Clients
|
// create 2 instances to get 2 remote.Clients
|
||||||
sA, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
sA, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
"address": addr,
|
"address": srv.HTTPAddr,
|
||||||
"path": path,
|
"path": path,
|
||||||
}).State(backend.DefaultStateName)
|
}).State(backend.DefaultStateName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -57,7 +89,7 @@ func TestConsul_stateLock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
sB, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
sB, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
"address": addr,
|
"address": srv.HTTPAddr,
|
||||||
"path": path,
|
"path": path,
|
||||||
}).State(backend.DefaultStateName)
|
}).State(backend.DefaultStateName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/config"
|
"github.com/hashicorp/terraform/config"
|
||||||
|
"github.com/hashicorp/terraform/state"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -40,8 +41,15 @@ func TestBackendConfig(t *testing.T, b Backend, c map[string]interface{}) Backen
|
||||||
// assumed to already be configured. This will test state functionality.
|
// assumed to already be configured. This will test state functionality.
|
||||||
// If the backend reports it doesn't support multi-state by returning the
|
// If the backend reports it doesn't support multi-state by returning the
|
||||||
// error ErrNamedStatesNotSupported, then it will not test that.
|
// error ErrNamedStatesNotSupported, then it will not test that.
|
||||||
func TestBackend(t *testing.T, b Backend) {
|
//
|
||||||
testBackendStates(t, b)
|
// If you want to test locking, two backends must be given. If b2 is nil,
|
||||||
|
// then state lockign won't be tested.
|
||||||
|
func TestBackend(t *testing.T, b1, b2 Backend) {
|
||||||
|
testBackendStates(t, b1)
|
||||||
|
|
||||||
|
if b2 != nil {
|
||||||
|
testBackendStateLock(t, b1, b2)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testBackendStates(t *testing.T, b Backend) {
|
func testBackendStates(t *testing.T, b Backend) {
|
||||||
|
@ -82,6 +90,10 @@ func testBackendStates(t *testing.T, b Backend) {
|
||||||
// Verify they are distinct states
|
// Verify they are distinct states
|
||||||
{
|
{
|
||||||
s := barState.State()
|
s := barState.State()
|
||||||
|
if s == nil {
|
||||||
|
s = terraform.NewState()
|
||||||
|
}
|
||||||
|
|
||||||
s.Lineage = "bar"
|
s.Lineage = "bar"
|
||||||
if err := barState.WriteState(s); err != nil {
|
if err := barState.WriteState(s); err != nil {
|
||||||
t.Fatalf("bad: %s", err)
|
t.Fatalf("bad: %s", err)
|
||||||
|
@ -93,7 +105,7 @@ func testBackendStates(t *testing.T, b Backend) {
|
||||||
if err := fooState.RefreshState(); err != nil {
|
if err := fooState.RefreshState(); err != nil {
|
||||||
t.Fatalf("bad: %s", err)
|
t.Fatalf("bad: %s", err)
|
||||||
}
|
}
|
||||||
if v := fooState.State(); v.Lineage == "bar" {
|
if v := fooState.State(); v != nil && v.Lineage == "bar" {
|
||||||
t.Fatalf("bad: %#v", v)
|
t.Fatalf("bad: %#v", v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,3 +150,77 @@ func testBackendStates(t *testing.T, b Backend) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testBackendStateLock(t *testing.T, b1, b2 Backend) {
|
||||||
|
// Get the default state for each
|
||||||
|
b1StateMgr, err := b1.State(DefaultStateName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error: %s", err)
|
||||||
|
}
|
||||||
|
if err := b1StateMgr.RefreshState(); err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fast exit if this doesn't support locking at all
|
||||||
|
if _, ok := b1StateMgr.(state.Locker); !ok {
|
||||||
|
t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("TestBackend: testing state locking for %T", b1)
|
||||||
|
|
||||||
|
b2StateMgr, err := b2.State(DefaultStateName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error: %s", err)
|
||||||
|
}
|
||||||
|
if err := b2StateMgr.RefreshState(); err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reassign so its obvious whats happening
|
||||||
|
lockerA := b1StateMgr.(state.Locker)
|
||||||
|
lockerB := b2StateMgr.(state.Locker)
|
||||||
|
|
||||||
|
infoA := state.NewLockInfo()
|
||||||
|
infoA.Operation = "test"
|
||||||
|
infoA.Who = "clientA"
|
||||||
|
|
||||||
|
infoB := state.NewLockInfo()
|
||||||
|
infoB.Operation = "test"
|
||||||
|
infoB.Who = "clientB"
|
||||||
|
|
||||||
|
lockIDA, err := lockerA.Lock(infoA)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("unable to get initial lock:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the lock ID is blank, assume locking is disabled
|
||||||
|
if lockIDA == "" {
|
||||||
|
t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = lockerB.Lock(infoB)
|
||||||
|
if err == nil {
|
||||||
|
lockerA.Unlock(lockIDA)
|
||||||
|
t.Fatal("client B obtained lock while held by client A")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := lockerA.Unlock(lockIDA); err != nil {
|
||||||
|
t.Fatal("error unlocking client A", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lockIDB, err := lockerB.Lock(infoB)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("unable to obtain lock from client B")
|
||||||
|
}
|
||||||
|
|
||||||
|
if lockIDB == lockIDA {
|
||||||
|
t.Fatalf("duplicate lock IDs: %q", lockIDB)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = lockerB.Unlock(lockIDB); err != nil {
|
||||||
|
t.Fatal("error unlocking client B:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -308,7 +308,7 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
client.kmsconn = kms.New(sess)
|
client.kmsconn = kms.New(sess)
|
||||||
client.lambdaconn = lambda.New(sess)
|
client.lambdaconn = lambda.New(sess)
|
||||||
client.lightsailconn = lightsail.New(usEast1Sess)
|
client.lightsailconn = lightsail.New(usEast1Sess)
|
||||||
client.opsworksconn = opsworks.New(usEast1Sess)
|
client.opsworksconn = opsworks.New(sess)
|
||||||
client.r53conn = route53.New(usEast1Sess)
|
client.r53conn = route53.New(usEast1Sess)
|
||||||
client.rdsconn = rds.New(sess)
|
client.rdsconn = rds.New(sess)
|
||||||
client.redshiftconn = redshift.New(sess)
|
client.redshiftconn = redshift.New(sess)
|
||||||
|
|
|
@ -20,6 +20,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"address": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"allocated_storage": {
|
"allocated_storage": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -82,6 +87,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"endpoint": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"engine": {
|
"engine": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -92,6 +102,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"hosted_zone_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"iops": {
|
"iops": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -133,6 +148,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"port": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"preferred_backup_window": {
|
"preferred_backup_window": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -232,6 +252,10 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error
|
||||||
d.Set("master_username", dbInstance.MasterUsername)
|
d.Set("master_username", dbInstance.MasterUsername)
|
||||||
d.Set("monitoring_interval", dbInstance.MonitoringInterval)
|
d.Set("monitoring_interval", dbInstance.MonitoringInterval)
|
||||||
d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn)
|
d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn)
|
||||||
|
d.Set("address", dbInstance.Endpoint.Address)
|
||||||
|
d.Set("port", dbInstance.Endpoint.Port)
|
||||||
|
d.Set("hosted_zone_id", dbInstance.Endpoint.HostedZoneId)
|
||||||
|
d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port))
|
||||||
|
|
||||||
var optionGroups []string
|
var optionGroups []string
|
||||||
for _, v := range dbInstance.OptionGroupMemberships {
|
for _, v := range dbInstance.OptionGroupMemberships {
|
||||||
|
|
|
@ -28,6 +28,25 @@ func TestAccAWSDataDbInstance_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDataDbInstance_endpoint(t *testing.T) {
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSDBInstanceConfigWithDataSource(rInt),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "address"),
|
||||||
|
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "port"),
|
||||||
|
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "hosted_zone_id"),
|
||||||
|
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "endpoint"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccAWSDBInstanceConfigWithDataSource(rInt int) string {
|
func testAccAWSDBInstanceConfigWithDataSource(rInt int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_db_instance" "bar" {
|
resource "aws_db_instance" "bar" {
|
||||||
|
|
|
@ -51,7 +51,7 @@ func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
taskDefinition := *desc.TaskDefinition
|
taskDefinition := *desc.TaskDefinition
|
||||||
|
|
|
@ -335,7 +335,6 @@ resource "aws_instance" "foo" {
|
||||||
root_block_device {
|
root_block_device {
|
||||||
volume_type = "gp2"
|
volume_type = "gp2"
|
||||||
volume_size = 11
|
volume_size = 11
|
||||||
iops = 330
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,16 @@ func dataSourceAwsRouteTable() *schema.Resource {
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"ipv6_cidr_block": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"egress_only_gateway_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"gateway_id": {
|
"gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -177,6 +187,12 @@ func dataSourceRoutesRead(ec2Routes []*ec2.Route) []map[string]interface{} {
|
||||||
if r.DestinationCidrBlock != nil {
|
if r.DestinationCidrBlock != nil {
|
||||||
m["cidr_block"] = *r.DestinationCidrBlock
|
m["cidr_block"] = *r.DestinationCidrBlock
|
||||||
}
|
}
|
||||||
|
if r.DestinationIpv6CidrBlock != nil {
|
||||||
|
m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock
|
||||||
|
}
|
||||||
|
if r.EgressOnlyInternetGatewayId != nil {
|
||||||
|
m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId
|
||||||
|
}
|
||||||
if r.GatewayId != nil {
|
if r.GatewayId != nil {
|
||||||
m["gateway_id"] = *r.GatewayId
|
m["gateway_id"] = *r.GatewayId
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ func TestAccDataSourceAwsRouteTable_basic(t *testing.T) {
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDataSourceAwsRouteTableGroupConfig,
|
Config: testAccDataSourceAwsRouteTableGroupConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"),
|
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"),
|
||||||
|
@ -33,7 +33,7 @@ func TestAccDataSourceAwsRouteTable_main(t *testing.T) {
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDataSourceAwsRouteTableMainRoute,
|
Config: testAccDataSourceAwsRouteTableMainRoute,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccDataSourceAwsRouteTableCheckMain("data.aws_route_table.by_filter"),
|
testAccDataSourceAwsRouteTableCheckMain("data.aws_route_table.by_filter"),
|
||||||
|
|
|
@ -14,19 +14,19 @@ func dataSourceAwsVpc() *schema.Resource {
|
||||||
Read: dataSourceAwsVpcRead,
|
Read: dataSourceAwsVpcRead,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"cidr_block": &schema.Schema{
|
"cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"dhcp_options_id": &schema.Schema{
|
"dhcp_options_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"default": &schema.Schema{
|
"default": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -34,18 +34,28 @@ func dataSourceAwsVpc() *schema.Resource {
|
||||||
|
|
||||||
"filter": ec2CustomFiltersSchema(),
|
"filter": ec2CustomFiltersSchema(),
|
||||||
|
|
||||||
"id": &schema.Schema{
|
"id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_tenancy": &schema.Schema{
|
"instance_tenancy": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"state": &schema.Schema{
|
"ipv6_cidr_block": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"ipv6_association_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"state": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -117,5 +127,10 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
d.Set("state", vpc.State)
|
d.Set("state", vpc.State)
|
||||||
d.Set("tags", tagsToMap(vpc.Tags))
|
d.Set("tags", tagsToMap(vpc.Tags))
|
||||||
|
|
||||||
|
if vpc.Ipv6CidrBlockAssociationSet != nil {
|
||||||
|
d.Set("ipv6_association_id", vpc.Ipv6CidrBlockAssociationSet[0].AssociationId)
|
||||||
|
d.Set("ipv6_cidr_block", vpc.Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,31 +2,60 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccDataSourceAwsVpc_basic(t *testing.T) {
|
func TestAccDataSourceAwsVpc_basic(t *testing.T) {
|
||||||
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
rInt := rand.Intn(16)
|
||||||
|
cidr := fmt.Sprintf("172.%d.0.0/16", rInt)
|
||||||
|
tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt)
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDataSourceAwsVpcConfig,
|
Config: testAccDataSourceAwsVpcConfig(cidr, tag),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
|
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag),
|
||||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr"),
|
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr", cidr, tag),
|
||||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag"),
|
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag", cidr, tag),
|
||||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter"),
|
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter", cidr, tag),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
func TestAccDataSourceAwsVpc_ipv6Associated(t *testing.T) {
|
||||||
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
rInt := rand.Intn(16)
|
||||||
|
cidr := fmt.Sprintf("172.%d.0.0/16", rInt)
|
||||||
|
tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt)
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccDataSourceAwsVpcConfigIpv6(cidr, tag),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag),
|
||||||
|
resource.TestCheckResourceAttrSet(
|
||||||
|
"data.aws_vpc.by_id", "ipv6_association_id"),
|
||||||
|
resource.TestCheckResourceAttrSet(
|
||||||
|
"data.aws_vpc.by_id", "ipv6_cidr_block"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceAwsVpcCheck(name, cidr, tag string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[name]
|
rs, ok := s.RootModule().Resources[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -48,10 +77,10 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr["cidr_block"] != "172.16.0.0/16" {
|
if attr["cidr_block"] != cidr {
|
||||||
return fmt.Errorf("bad cidr_block %s", attr["cidr_block"])
|
return fmt.Errorf("bad cidr_block %s, expected: %s", attr["cidr_block"], cidr)
|
||||||
}
|
}
|
||||||
if attr["tags.Name"] != "terraform-testacc-vpc-data-source" {
|
if attr["tags.Name"] != tag {
|
||||||
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
|
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,16 +88,37 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccDataSourceAwsVpcConfig = `
|
func testAccDataSourceAwsVpcConfigIpv6(cidr, tag string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_vpc" "test" {
|
resource "aws_vpc" "test" {
|
||||||
cidr_block = "172.16.0.0/16"
|
cidr_block = "%s"
|
||||||
|
assign_generated_ipv6_cidr_block = true
|
||||||
|
|
||||||
tags {
|
tags {
|
||||||
Name = "terraform-testacc-vpc-data-source"
|
Name = "%s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_vpc" "by_id" {
|
||||||
|
id = "${aws_vpc.test.id}"
|
||||||
|
}`, cidr, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceAwsVpcConfig(cidr, tag string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-west-2"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "%s"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "%s"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,5 +141,5 @@ data "aws_vpc" "by_filter" {
|
||||||
name = "cidr"
|
name = "cidr"
|
||||||
values = ["${aws_vpc.test.cidr_block}"]
|
values = ["${aws_vpc.test.cidr_block}"]
|
||||||
}
|
}
|
||||||
|
}`, cidr, tag)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -42,3 +44,17 @@ func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData)
|
||||||
// Throw a diff by default
|
// Throw a diff by default
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool {
|
||||||
|
ob := bytes.NewBufferString("")
|
||||||
|
if err := json.Compact(ob, []byte(old)); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nb := bytes.NewBufferString("")
|
||||||
|
if err := json.Compact(nb, []byte(new)); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonBytesEqual(ob.Bytes(), nb.Bytes())
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSuppressEquivalentJsonDiffsWhitespaceAndNoWhitespace(t *testing.T) {
|
||||||
|
d := new(schema.ResourceData)
|
||||||
|
|
||||||
|
noWhitespace := `{"test":"test"}`
|
||||||
|
whitespace := `
|
||||||
|
{
|
||||||
|
"test": "test"
|
||||||
|
}`
|
||||||
|
|
||||||
|
if !suppressEquivalentJsonDiffs("", noWhitespace, whitespace, d) {
|
||||||
|
t.Errorf("Expected suppressEquivalentJsonDiffs to return true for %s == %s", noWhitespace, whitespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
noWhitespaceDiff := `{"test":"test"}`
|
||||||
|
whitespaceDiff := `
|
||||||
|
{
|
||||||
|
"test": "tested"
|
||||||
|
}`
|
||||||
|
|
||||||
|
if suppressEquivalentJsonDiffs("", noWhitespaceDiff, whitespaceDiff, d) {
|
||||||
|
t.Errorf("Expected suppressEquivalentJsonDiffs to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSIAMAccountAlias_importBasic(t *testing.T) {
|
||||||
|
resourceName := "aws_iam_account_alias.test"
|
||||||
|
|
||||||
|
rstring := acctest.RandString(5)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSIAMAccountAliasConfig(rstring),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
|
@ -51,6 +51,7 @@ func resourceAwsRouteTableImportState(
|
||||||
d.SetType("aws_route")
|
d.SetType("aws_route")
|
||||||
d.Set("route_table_id", id)
|
d.Set("route_table_id", id)
|
||||||
d.Set("destination_cidr_block", route.DestinationCidrBlock)
|
d.Set("destination_cidr_block", route.DestinationCidrBlock)
|
||||||
|
d.Set("destination_ipv6_cidr_block", route.DestinationIpv6CidrBlock)
|
||||||
d.SetId(routeIDHash(d, route))
|
d.SetId(routeIDHash(d, route))
|
||||||
results = append(results, d)
|
results = append(results, d)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,11 +23,11 @@ func TestAccAWSRouteTable_importBasic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfig,
|
Config: testAccRouteTableConfig,
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
ResourceName: "aws_route_table.foo",
|
ResourceName: "aws_route_table.foo",
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateCheck: checkFn,
|
ImportStateCheck: checkFn,
|
||||||
|
@ -51,11 +51,11 @@ func TestAccAWSRouteTable_complex(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfig_complexImport,
|
Config: testAccRouteTableConfig_complexImport,
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
ResourceName: "aws_route_table.mod",
|
ResourceName: "aws_route_table.mod",
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateCheck: checkFn,
|
ImportStateCheck: checkFn,
|
||||||
|
|
|
@ -66,13 +66,20 @@ func resourceAwsSecurityGroupImportStatePerm(sg *ec2.SecurityGroup, ruleType str
|
||||||
p := &ec2.IpPermission{
|
p := &ec2.IpPermission{
|
||||||
FromPort: perm.FromPort,
|
FromPort: perm.FromPort,
|
||||||
IpProtocol: perm.IpProtocol,
|
IpProtocol: perm.IpProtocol,
|
||||||
IpRanges: perm.IpRanges,
|
|
||||||
PrefixListIds: perm.PrefixListIds,
|
PrefixListIds: perm.PrefixListIds,
|
||||||
ToPort: perm.ToPort,
|
ToPort: perm.ToPort,
|
||||||
|
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{pair},
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{pair},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if perm.Ipv6Ranges != nil {
|
||||||
|
p.Ipv6Ranges = perm.Ipv6Ranges
|
||||||
|
}
|
||||||
|
|
||||||
|
if perm.IpRanges != nil {
|
||||||
|
p.IpRanges = perm.IpRanges
|
||||||
|
}
|
||||||
|
|
||||||
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
|
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -23,11 +23,39 @@ func TestAccAWSSecurityGroup_importBasic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig,
|
Config: testAccAWSSecurityGroupConfig,
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
|
ResourceName: "aws_security_group.web",
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateCheck: checkFn,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSSecurityGroup_importIpv6(t *testing.T) {
|
||||||
|
checkFn := func(s []*terraform.InstanceState) error {
|
||||||
|
// Expect 3: group, 2 rules
|
||||||
|
if len(s) != 3 {
|
||||||
|
return fmt.Errorf("expected 3 states: %#v", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSSecurityGroupConfigIpv6,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
ResourceName: "aws_security_group.web",
|
ResourceName: "aws_security_group.web",
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateCheck: checkFn,
|
ImportStateCheck: checkFn,
|
||||||
|
@ -42,11 +70,11 @@ func TestAccAWSSecurityGroup_importSelf(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_importSelf,
|
Config: testAccAWSSecurityGroupConfig_importSelf,
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
ResourceName: "aws_security_group.allow_all",
|
ResourceName: "aws_security_group.allow_all",
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateVerify: true,
|
ImportStateVerify: true,
|
||||||
|
@ -61,11 +89,11 @@ func TestAccAWSSecurityGroup_importSourceSecurityGroup(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_importSourceSecurityGroup,
|
Config: testAccAWSSecurityGroupConfig_importSourceSecurityGroup,
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
ResourceName: "aws_security_group.test_group_1",
|
ResourceName: "aws_security_group.test_group_1",
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateVerify: true,
|
ImportStateVerify: true,
|
||||||
|
|
|
@ -32,7 +32,14 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
|
||||||
Egress: aws.Bool(entryType == "egress"),
|
Egress: aws.Bool(entryType == "egress"),
|
||||||
RuleAction: aws.String(data["action"].(string)),
|
RuleAction: aws.String(data["action"].(string)),
|
||||||
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
|
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
|
||||||
CidrBlock: aws.String(data["cidr_block"].(string)),
|
}
|
||||||
|
|
||||||
|
if v, ok := data["ipv6_cidr_block"]; ok {
|
||||||
|
e.Ipv6CidrBlock = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := data["cidr_block"]; ok {
|
||||||
|
e.CidrBlock = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specify additional required fields for ICMP
|
// Specify additional required fields for ICMP
|
||||||
|
@ -55,14 +62,24 @@ func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interfac
|
||||||
entries := make([]map[string]interface{}, 0, len(list))
|
entries := make([]map[string]interface{}, 0, len(list))
|
||||||
|
|
||||||
for _, entry := range list {
|
for _, entry := range list {
|
||||||
entries = append(entries, map[string]interface{}{
|
|
||||||
|
newEntry := map[string]interface{}{
|
||||||
"from_port": *entry.PortRange.From,
|
"from_port": *entry.PortRange.From,
|
||||||
"to_port": *entry.PortRange.To,
|
"to_port": *entry.PortRange.To,
|
||||||
"action": *entry.RuleAction,
|
"action": *entry.RuleAction,
|
||||||
"rule_no": *entry.RuleNumber,
|
"rule_no": *entry.RuleNumber,
|
||||||
"protocol": *entry.Protocol,
|
"protocol": *entry.Protocol,
|
||||||
"cidr_block": *entry.CidrBlock,
|
}
|
||||||
})
|
|
||||||
|
if entry.CidrBlock != nil {
|
||||||
|
newEntry["cidr_block"] = *entry.CidrBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Ipv6CidrBlock != nil {
|
||||||
|
newEntry["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = append(entries, newEntry)
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries
|
return entries
|
||||||
|
|
|
@ -298,6 +298,7 @@ func Provider() terraform.ResourceProvider {
|
||||||
"aws_flow_log": resourceAwsFlowLog(),
|
"aws_flow_log": resourceAwsFlowLog(),
|
||||||
"aws_glacier_vault": resourceAwsGlacierVault(),
|
"aws_glacier_vault": resourceAwsGlacierVault(),
|
||||||
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
||||||
|
"aws_iam_account_alias": resourceAwsIamAccountAlias(),
|
||||||
"aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(),
|
"aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(),
|
||||||
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
||||||
"aws_iam_group": resourceAwsIamGroup(),
|
"aws_iam_group": resourceAwsIamGroup(),
|
||||||
|
|
|
@ -21,27 +21,34 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
|
||||||
|
//According to AWS Documentation, ACM will be the only way to add certificates
|
||||||
|
//to ApiGateway DomainNames. When this happens, we will be deprecating all certificate methods
|
||||||
|
//except certificate_arn. We are not quite sure when this will happen.
|
||||||
"certificate_body": {
|
"certificate_body": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"certificate_arn"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"certificate_chain": {
|
"certificate_chain": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"certificate_arn"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"certificate_name": {
|
"certificate_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"certificate_arn"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"certificate_private_key": {
|
"certificate_private_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"certificate_arn"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"domain_name": {
|
"domain_name": {
|
||||||
|
@ -50,6 +57,12 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"certificate_arn": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"certificate_body", "certificate_chain", "certificate_name", "certificate_private_key"},
|
||||||
|
},
|
||||||
|
|
||||||
"cloudfront_domain_name": {
|
"cloudfront_domain_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -72,13 +85,31 @@ func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interfac
|
||||||
conn := meta.(*AWSClient).apigateway
|
conn := meta.(*AWSClient).apigateway
|
||||||
log.Printf("[DEBUG] Creating API Gateway Domain Name")
|
log.Printf("[DEBUG] Creating API Gateway Domain Name")
|
||||||
|
|
||||||
domainName, err := conn.CreateDomainName(&apigateway.CreateDomainNameInput{
|
params := &apigateway.CreateDomainNameInput{
|
||||||
CertificateBody: aws.String(d.Get("certificate_body").(string)),
|
|
||||||
CertificateChain: aws.String(d.Get("certificate_chain").(string)),
|
|
||||||
CertificateName: aws.String(d.Get("certificate_name").(string)),
|
|
||||||
CertificatePrivateKey: aws.String(d.Get("certificate_private_key").(string)),
|
|
||||||
DomainName: aws.String(d.Get("domain_name").(string)),
|
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("certificate_arn"); ok {
|
||||||
|
params.CertificateArn = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("certificate_name"); ok {
|
||||||
|
params.CertificateName = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("certificate_body"); ok {
|
||||||
|
params.CertificateBody = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("certificate_chain"); ok {
|
||||||
|
params.CertificateChain = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("certificate_private_key"); ok {
|
||||||
|
params.CertificatePrivateKey = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
domainName, err := conn.CreateDomainName(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating API Gateway Domain Name: %s", err)
|
return fmt.Errorf("Error creating API Gateway Domain Name: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -113,6 +144,7 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{
|
||||||
}
|
}
|
||||||
d.Set("cloudfront_domain_name", domainName.DistributionDomainName)
|
d.Set("cloudfront_domain_name", domainName.DistributionDomainName)
|
||||||
d.Set("domain_name", domainName.DomainName)
|
d.Set("domain_name", domainName.DomainName)
|
||||||
|
d.Set("certificate_arn", domainName.CertificateArn)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -128,6 +160,14 @@ func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []*
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("certificate_arn") {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("replace"),
|
||||||
|
Path: aws.String("/certificateArn"),
|
||||||
|
Value: aws.String(d.Get("certificate_arn").(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return operations
|
return operations
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,6 +179,7 @@ func resourceAwsApiGatewayDomainNameUpdate(d *schema.ResourceData, meta interfac
|
||||||
DomainName: aws.String(d.Id()),
|
DomainName: aws.String(d.Id()),
|
||||||
PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d),
|
PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -472,13 +472,15 @@ func TestAccAWSAutoScalingGroup_ALB_TargetGroups_ELBCapacity(t *testing.T) {
|
||||||
var group autoscaling.Group
|
var group autoscaling.Group
|
||||||
var tg elbv2.TargetGroup
|
var tg elbv2.TargetGroup
|
||||||
|
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
|
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity,
|
Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt),
|
||||||
Check: resource.ComposeAggregateTestCheckFunc(
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
|
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
|
||||||
testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg),
|
testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg),
|
||||||
|
@ -1386,7 +1388,8 @@ resource "aws_autoscaling_group" "bar" {
|
||||||
`, name)
|
`, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity = `
|
func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
}
|
}
|
||||||
|
@ -1420,7 +1423,7 @@ resource "aws_alb_listener" "test_listener" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_alb_target_group" "test" {
|
resource "aws_alb_target_group" "test" {
|
||||||
name = "tf-example-alb-tg"
|
name = "tf-alb-test-%d"
|
||||||
port = 80
|
port = 80
|
||||||
protocol = "HTTP"
|
protocol = "HTTP"
|
||||||
vpc_id = "${aws_vpc.default.id}"
|
vpc_id = "${aws_vpc.default.id}"
|
||||||
|
@ -1431,6 +1434,10 @@ resource "aws_alb_target_group" "test" {
|
||||||
timeout = "2"
|
timeout = "2"
|
||||||
interval = "5"
|
interval = "5"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_subnet" "main" {
|
resource "aws_subnet" "main" {
|
||||||
|
@ -1522,8 +1529,8 @@ resource "aws_autoscaling_group" "bar" {
|
||||||
force_delete = true
|
force_delete = true
|
||||||
termination_policies = ["OldestInstance"]
|
termination_policies = ["OldestInstance"]
|
||||||
launch_configuration = "${aws_launch_configuration.foobar.name}"
|
launch_configuration = "${aws_launch_configuration.foobar.name}"
|
||||||
|
}`, rInt)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
func testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(name string) string {
|
func testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(name string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
|
|
|
@ -164,6 +164,13 @@ func resourceAwsCodeBuildProject() *schema.Resource {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ValidateFunc: validateAwsCodeBuildTimeout,
|
ValidateFunc: validateAwsCodeBuildTimeout,
|
||||||
|
Removed: "This field has been removed. Please use build_timeout instead",
|
||||||
|
},
|
||||||
|
"build_timeout": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: "60",
|
||||||
|
ValidateFunc: validateAwsCodeBuildTimeout,
|
||||||
},
|
},
|
||||||
"tags": tagsSchema(),
|
"tags": tagsSchema(),
|
||||||
},
|
},
|
||||||
|
@ -196,7 +203,7 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{})
|
||||||
params.ServiceRole = aws.String(v.(string))
|
params.ServiceRole = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("timeout"); ok {
|
if v, ok := d.GetOk("build_timeout"); ok {
|
||||||
params.TimeoutInMinutes = aws.Int64(int64(v.(int)))
|
params.TimeoutInMinutes = aws.Int64(int64(v.(int)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -373,7 +380,7 @@ func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) e
|
||||||
d.Set("encryption_key", project.EncryptionKey)
|
d.Set("encryption_key", project.EncryptionKey)
|
||||||
d.Set("name", project.Name)
|
d.Set("name", project.Name)
|
||||||
d.Set("service_role", project.ServiceRole)
|
d.Set("service_role", project.ServiceRole)
|
||||||
d.Set("timeout", project.TimeoutInMinutes)
|
d.Set("build_timeout", project.TimeoutInMinutes)
|
||||||
|
|
||||||
if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil {
|
if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -416,8 +423,8 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{})
|
||||||
params.ServiceRole = aws.String(d.Get("service_role").(string))
|
params.ServiceRole = aws.String(d.Get("service_role").(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("timeout") {
|
if d.HasChange("build_timeout") {
|
||||||
params.TimeoutInMinutes = aws.Int64(int64(d.Get("timeout").(int)))
|
params.TimeoutInMinutes = aws.Int64(int64(d.Get("build_timeout").(int)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// The documentation clearly says "The replacement set of tags for this build project."
|
// The documentation clearly says "The replacement set of tags for this build project."
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsCodebuildMigrateState(
|
||||||
|
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||||
|
switch v {
|
||||||
|
case 0:
|
||||||
|
log.Println("[INFO] Found AWS Codebuild State v0; migrating to v1")
|
||||||
|
return migrateCodebuildStateV0toV1(is)
|
||||||
|
default:
|
||||||
|
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func migrateCodebuildStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||||
|
if is.Empty() {
|
||||||
|
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||||
|
return is, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||||
|
|
||||||
|
if is.Attributes["timeout"] != "" {
|
||||||
|
is.Attributes["build_timeout"] = strings.TrimSpace(is.Attributes["timeout"])
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||||
|
return is, nil
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAWSCodebuildMigrateState(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
StateVersion int
|
||||||
|
ID string
|
||||||
|
Attributes map[string]string
|
||||||
|
Expected string
|
||||||
|
Meta interface{}
|
||||||
|
}{
|
||||||
|
"v0_1": {
|
||||||
|
StateVersion: 0,
|
||||||
|
ID: "tf-testing-file",
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"description": "some description",
|
||||||
|
"timeout": "5",
|
||||||
|
},
|
||||||
|
Expected: "5",
|
||||||
|
},
|
||||||
|
"v0_2": {
|
||||||
|
StateVersion: 0,
|
||||||
|
ID: "tf-testing-file",
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"description": "some description",
|
||||||
|
"build_timeout": "5",
|
||||||
|
},
|
||||||
|
Expected: "5",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for tn, tc := range cases {
|
||||||
|
is := &terraform.InstanceState{
|
||||||
|
ID: tc.ID,
|
||||||
|
Attributes: tc.Attributes,
|
||||||
|
}
|
||||||
|
is, err := resourceAwsCodebuildMigrateState(
|
||||||
|
tc.StateVersion, is, tc.Meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if is.Attributes["build_timeout"] != tc.Expected {
|
||||||
|
t.Fatalf("Bad build_timeout migration: %s\n\n expected: %s", is.Attributes["build_timeout"], tc.Expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -25,19 +25,51 @@ func TestAccAWSCodeBuildProject_basic(t *testing.T) {
|
||||||
Config: testAccAWSCodeBuildProjectConfig_basic(name),
|
Config: testAccAWSCodeBuildProjectConfig_basic(name),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codebuild_project.foo", "build_timeout", "5"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
|
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codebuild_project.foo", "build_timeout", "50"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
|
func TestAccAWSCodeBuildProject_default_build_timeout(t *testing.T) {
|
||||||
|
name := acctest.RandString(10)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSCodeBuildProjectDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSCodeBuildProjectConfig_default_timeout(name),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codebuild_project.foo", "build_timeout", "60"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_codebuild_project.foo", "build_timeout", "50"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -57,7 +89,7 @@ func TestAccAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -94,7 +126,7 @@ func longTestData() string {
|
||||||
}, data)
|
}, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_nameValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_nameValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -115,7 +147,7 @@ func TestAccAWSCodeBuildProject_nameValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_descriptionValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_descriptionValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -133,7 +165,7 @@ func TestAccAWSCodeBuildProject_descriptionValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -153,7 +185,7 @@ func TestAccAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -171,7 +203,7 @@ func TestAccAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -192,7 +224,7 @@ func TestAccAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value string
|
Value string
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -210,7 +242,7 @@ func TestAccAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSCodeBuildProject_timeoutValidation(t *testing.T) {
|
func TestAWSCodeBuildProject_timeoutValidation(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Value int
|
Value int
|
||||||
ErrCount int
|
ErrCount int
|
||||||
|
@ -342,7 +374,7 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
|
||||||
resource "aws_codebuild_project" "foo" {
|
resource "aws_codebuild_project" "foo" {
|
||||||
name = "test-project-%s"
|
name = "test-project-%s"
|
||||||
description = "test_codebuild_project"
|
description = "test_codebuild_project"
|
||||||
timeout = "5"
|
build_timeout = "5"
|
||||||
service_role = "${aws_iam_role.codebuild_role.arn}"
|
service_role = "${aws_iam_role.codebuild_role.arn}"
|
||||||
|
|
||||||
artifacts {
|
artifacts {
|
||||||
|
@ -429,7 +461,94 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
|
||||||
resource "aws_codebuild_project" "foo" {
|
resource "aws_codebuild_project" "foo" {
|
||||||
name = "test-project-%s"
|
name = "test-project-%s"
|
||||||
description = "test_codebuild_project"
|
description = "test_codebuild_project"
|
||||||
timeout = "5"
|
build_timeout = "50"
|
||||||
|
service_role = "${aws_iam_role.codebuild_role.arn}"
|
||||||
|
|
||||||
|
artifacts {
|
||||||
|
type = "NO_ARTIFACTS"
|
||||||
|
}
|
||||||
|
|
||||||
|
environment {
|
||||||
|
compute_type = "BUILD_GENERAL1_SMALL"
|
||||||
|
image = "2"
|
||||||
|
type = "LINUX_CONTAINER"
|
||||||
|
|
||||||
|
environment_variable = {
|
||||||
|
"name" = "SOME_OTHERKEY"
|
||||||
|
"value" = "SOME_OTHERVALUE"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
source {
|
||||||
|
auth {
|
||||||
|
type = "OAUTH"
|
||||||
|
}
|
||||||
|
|
||||||
|
type = "GITHUB"
|
||||||
|
location = "https://github.com/mitchellh/packer.git"
|
||||||
|
}
|
||||||
|
|
||||||
|
tags {
|
||||||
|
"Environment" = "Test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, rName, rName, rName, rName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSCodeBuildProjectConfig_default_timeout(rName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_iam_role" "codebuild_role" {
|
||||||
|
name = "codebuild-role-%s"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "codebuild.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_policy" "codebuild_policy" {
|
||||||
|
name = "codebuild-policy-%s"
|
||||||
|
path = "/service-role/"
|
||||||
|
description = "Policy used in trust relationship with CodeBuild"
|
||||||
|
policy = <<POLICY
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Resource": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
|
"Action": [
|
||||||
|
"logs:CreateLogGroup",
|
||||||
|
"logs:CreateLogStream",
|
||||||
|
"logs:PutLogEvents"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
POLICY
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
|
||||||
|
name = "codebuild-policy-attachment-%s"
|
||||||
|
policy_arn = "${aws_iam_policy.codebuild_policy.arn}"
|
||||||
|
roles = ["${aws_iam_role.codebuild_role.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_codebuild_project" "foo" {
|
||||||
|
name = "test-project-%s"
|
||||||
|
description = "test_codebuild_project"
|
||||||
|
|
||||||
service_role = "${aws_iam_role.codebuild_role.arn}"
|
service_role = "${aws_iam_role.codebuild_role.arn}"
|
||||||
|
|
||||||
artifacts {
|
artifacts {
|
||||||
|
|
|
@ -25,19 +25,19 @@ func resourceAwsCustomerGateway() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"bgp_asn": &schema.Schema{
|
"bgp_asn": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"ip_address": &schema.Schema{
|
"ip_address": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"type": &schema.Schema{
|
"type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
@ -51,10 +51,23 @@ func resourceAwsCustomerGateway() *schema.Resource {
|
||||||
func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).ec2conn
|
conn := meta.(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
ipAddress := d.Get("ip_address").(string)
|
||||||
|
vpnType := d.Get("type").(string)
|
||||||
|
bgpAsn := d.Get("bgp_asn").(int)
|
||||||
|
|
||||||
|
alreadyExists, err := resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, conn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if alreadyExists {
|
||||||
|
return fmt.Errorf("An existing customer gateway for IpAddress: %s, VpnType: %s, BGP ASN: %d has been found", ipAddress, vpnType, bgpAsn)
|
||||||
|
}
|
||||||
|
|
||||||
createOpts := &ec2.CreateCustomerGatewayInput{
|
createOpts := &ec2.CreateCustomerGatewayInput{
|
||||||
BgpAsn: aws.Int64(int64(d.Get("bgp_asn").(int))),
|
BgpAsn: aws.Int64(int64(bgpAsn)),
|
||||||
PublicIp: aws.String(d.Get("ip_address").(string)),
|
PublicIp: aws.String(ipAddress),
|
||||||
Type: aws.String(d.Get("type").(string)),
|
Type: aws.String(vpnType),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the Customer Gateway.
|
// Create the Customer Gateway.
|
||||||
|
@ -123,6 +136,37 @@ func customerGatewayRefreshFunc(conn *ec2.EC2, gatewayId string) resource.StateR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceAwsCustomerGatewayExists(vpnType, ipAddress string, bgpAsn int, conn *ec2.EC2) (bool, error) {
|
||||||
|
ipAddressFilter := &ec2.Filter{
|
||||||
|
Name: aws.String("ip-address"),
|
||||||
|
Values: []*string{aws.String(ipAddress)},
|
||||||
|
}
|
||||||
|
|
||||||
|
typeFilter := &ec2.Filter{
|
||||||
|
Name: aws.String("type"),
|
||||||
|
Values: []*string{aws.String(vpnType)},
|
||||||
|
}
|
||||||
|
|
||||||
|
bgp := strconv.Itoa(bgpAsn)
|
||||||
|
bgpAsnFilter := &ec2.Filter{
|
||||||
|
Name: aws.String("bgp-asn"),
|
||||||
|
Values: []*string{aws.String(bgp)},
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{
|
||||||
|
Filters: []*ec2.Filter{ipAddressFilter, typeFilter, bgpAsnFilter},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.CustomerGateways) > 0 && *resp.CustomerGateways[0].State != "deleted" {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).ec2conn
|
conn := meta.(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -21,19 +22,19 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccCustomerGatewayConfig,
|
Config: testAccCustomerGatewayConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccCustomerGatewayConfigUpdateTags,
|
Config: testAccCustomerGatewayConfigUpdateTags,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccCustomerGatewayConfigForceReplace,
|
Config: testAccCustomerGatewayConfigForceReplace,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||||
|
@ -43,6 +44,28 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSCustomerGateway_similarAlreadyExists(t *testing.T) {
|
||||||
|
var gateway ec2.CustomerGateway
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "aws_customer_gateway.foo",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccCustomerGatewayConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccCustomerGatewayConfigIdentical,
|
||||||
|
ExpectError: regexp.MustCompile("An existing customer gateway"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSCustomerGateway_disappears(t *testing.T) {
|
func TestAccAWSCustomerGateway_disappears(t *testing.T) {
|
||||||
var gateway ec2.CustomerGateway
|
var gateway ec2.CustomerGateway
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
|
@ -50,7 +73,7 @@ func TestAccAWSCustomerGateway_disappears(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccCustomerGatewayConfig,
|
Config: testAccCustomerGatewayConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||||
|
@ -178,6 +201,26 @@ resource "aws_customer_gateway" "foo" {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccCustomerGatewayConfigIdentical = `
|
||||||
|
resource "aws_customer_gateway" "foo" {
|
||||||
|
bgp_asn = 65000
|
||||||
|
ip_address = "172.0.0.1"
|
||||||
|
type = "ipsec.1"
|
||||||
|
tags {
|
||||||
|
Name = "foo-gateway"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_customer_gateway" "identical" {
|
||||||
|
bgp_asn = 65000
|
||||||
|
ip_address = "172.0.0.1"
|
||||||
|
type = "ipsec.1"
|
||||||
|
tags {
|
||||||
|
Name = "foo-gateway-identical"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
// Add the Another: "tag" tag.
|
// Add the Another: "tag" tag.
|
||||||
const testAccCustomerGatewayConfigUpdateTags = `
|
const testAccCustomerGatewayConfigUpdateTags = `
|
||||||
resource "aws_customer_gateway" "foo" {
|
resource "aws_customer_gateway" "foo" {
|
||||||
|
|
|
@ -839,6 +839,10 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
}
|
}
|
||||||
d.SetPartial("apply_immediately")
|
d.SetPartial("apply_immediately")
|
||||||
|
|
||||||
|
if !d.Get("apply_immediately").(bool) {
|
||||||
|
log.Println("[INFO] Only settings updating, instance changes will be applied in next maintenance window")
|
||||||
|
}
|
||||||
|
|
||||||
requestUpdate := false
|
requestUpdate := false
|
||||||
if d.HasChange("allocated_storage") || d.HasChange("iops") {
|
if d.HasChange("allocated_storage") || d.HasChange("iops") {
|
||||||
d.SetPartial("allocated_storage")
|
d.SetPartial("allocated_storage")
|
||||||
|
|
|
@ -622,6 +622,10 @@ resource "aws_db_instance" "bar" {
|
||||||
backup_retention_period = 0
|
backup_retention_period = 0
|
||||||
|
|
||||||
parameter_group_name = "default.mysql5.6"
|
parameter_group_name = "default.mysql5.6"
|
||||||
|
|
||||||
|
timeouts {
|
||||||
|
create = "30m"
|
||||||
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var testAccAWSDBInstanceConfigKmsKeyId = `
|
var testAccAWSDBInstanceConfigKmsKeyId = `
|
||||||
|
|
|
@ -17,56 +17,66 @@ func resourceAwsDefaultRouteTable() *schema.Resource {
|
||||||
Delete: resourceAwsDefaultRouteTableDelete,
|
Delete: resourceAwsDefaultRouteTableDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"default_route_table_id": &schema.Schema{
|
"default_route_table_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_id": &schema.Schema{
|
"vpc_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"propagating_vgws": &schema.Schema{
|
"propagating_vgws": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"route": &schema.Schema{
|
"route": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"cidr_block": &schema.Schema{
|
"cidr_block": {
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gateway_id": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_id": &schema.Schema{
|
"ipv6_cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"nat_gateway_id": &schema.Schema{
|
"egress_only_gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_peering_connection_id": &schema.Schema{
|
"gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"network_interface_id": &schema.Schema{
|
"instance_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"nat_gateway_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"vpc_peering_connection_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"network_interface_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -193,6 +203,8 @@ func revokeAllRouteTableRules(defaultRouteTableId string, meta interface{}) erro
|
||||||
// See aws_vpc_endpoint
|
// See aws_vpc_endpoint
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.DestinationCidrBlock != nil {
|
||||||
log.Printf(
|
log.Printf(
|
||||||
"[INFO] Deleting route from %s: %s",
|
"[INFO] Deleting route from %s: %s",
|
||||||
defaultRouteTableId, *r.DestinationCidrBlock)
|
defaultRouteTableId, *r.DestinationCidrBlock)
|
||||||
|
@ -205,5 +217,20 @@ func revokeAllRouteTableRules(defaultRouteTableId string, meta interface{}) erro
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.DestinationIpv6CidrBlock != nil {
|
||||||
|
log.Printf(
|
||||||
|
"[INFO] Deleting route from %s: %s",
|
||||||
|
defaultRouteTableId, *r.DestinationIpv6CidrBlock)
|
||||||
|
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||||
|
RouteTableId: aws.String(defaultRouteTableId),
|
||||||
|
DestinationIpv6CidrBlock: r.DestinationIpv6CidrBlock,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSDefaultRouteTable_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDefaultRouteTableConfig,
|
Config: testAccDefaultRouteTableConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -40,7 +40,7 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDefaultRouteTable_change,
|
Config: testAccDefaultRouteTable_change,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -53,7 +53,7 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) {
|
||||||
// behavior that may happen, in which case a follow up plan will show (in
|
// behavior that may happen, in which case a follow up plan will show (in
|
||||||
// this case) a diff as the table now needs to be updated to match the
|
// this case) a diff as the table now needs to be updated to match the
|
||||||
// config
|
// config
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDefaultRouteTable_change_mod,
|
Config: testAccDefaultRouteTable_change_mod,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -74,7 +74,7 @@ func TestAccAWSDefaultRouteTable_vpc_endpoint(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccDefaultRouteTable_vpc_endpoint,
|
Config: testAccDefaultRouteTable_vpc_endpoint,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
|
|
@ -27,7 +27,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"cdc_start_time": {
|
"cdc_start_time": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
// Requires a Unix timestamp in seconds. Example 1484346880
|
// Requires a Unix timestamp in seconds. Example 1484346880
|
||||||
},
|
},
|
||||||
|
@ -60,6 +60,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ValidateFunc: validateJsonString,
|
ValidateFunc: validateJsonString,
|
||||||
|
DiffSuppressFunc: suppressEquivalentJsonDiffs,
|
||||||
},
|
},
|
||||||
"source_endpoint_arn": {
|
"source_endpoint_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -71,6 +72,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validateJsonString,
|
ValidateFunc: validateJsonString,
|
||||||
|
DiffSuppressFunc: suppressEquivalentJsonDiffs,
|
||||||
},
|
},
|
||||||
"tags": {
|
"tags": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
|
|
|
@ -70,11 +70,13 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
||||||
"name": {
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"host_path": {
|
"host_path": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -135,6 +135,41 @@ func TestAccAWSEcsTaskDefinition_constraint(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSEcsTaskDefinition_changeVolumesForcesNewResource(t *testing.T) {
|
||||||
|
var before ecs.TaskDefinition
|
||||||
|
var after ecs.TaskDefinition
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSEcsTaskDefinition,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &before),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAWSEcsTaskDefinitionUpdatedVolume,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &after),
|
||||||
|
testAccCheckEcsTaskDefinitionRecreated(t, &before, &after),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckEcsTaskDefinitionRecreated(t *testing.T,
|
||||||
|
before, after *ecs.TaskDefinition) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if *before.Revision == *after.Revision {
|
||||||
|
t.Fatalf("Expected change of TaskDefinition Revisions, but both were %v", before.Revision)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc {
|
func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
if len(def.PlacementConstraints) != 1 {
|
if len(def.PlacementConstraints) != 1 {
|
||||||
|
@ -319,6 +354,55 @@ TASK_DEFINITION
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
var testAccAWSEcsTaskDefinitionUpdatedVolume = `
|
||||||
|
resource "aws_ecs_task_definition" "jenkins" {
|
||||||
|
family = "terraform-acc-test"
|
||||||
|
container_definitions = <<TASK_DEFINITION
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep", "10"],
|
||||||
|
"entryPoint": ["/"],
|
||||||
|
"environment": [
|
||||||
|
{"name": "VARNAME", "value": "VARVAL"}
|
||||||
|
],
|
||||||
|
"essential": true,
|
||||||
|
"image": "jenkins",
|
||||||
|
"links": ["mongodb"],
|
||||||
|
"memory": 128,
|
||||||
|
"name": "jenkins",
|
||||||
|
"portMappings": [
|
||||||
|
{
|
||||||
|
"containerPort": 80,
|
||||||
|
"hostPort": 8080
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep", "10"],
|
||||||
|
"entryPoint": ["/"],
|
||||||
|
"essential": true,
|
||||||
|
"image": "mongodb",
|
||||||
|
"memory": 128,
|
||||||
|
"name": "mongodb",
|
||||||
|
"portMappings": [
|
||||||
|
{
|
||||||
|
"containerPort": 28017,
|
||||||
|
"hostPort": 28017
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
TASK_DEFINITION
|
||||||
|
|
||||||
|
volume {
|
||||||
|
name = "jenkins-home"
|
||||||
|
host_path = "/ecs/jenkins"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
var testAccAWSEcsTaskDefinitionWithScratchVolume = `
|
var testAccAWSEcsTaskDefinitionWithScratchVolume = `
|
||||||
resource "aws_ecs_task_definition" "sleep" {
|
resource "aws_ecs_task_definition" "sleep" {
|
||||||
family = "terraform-acc-sc-volume-test"
|
family = "terraform-acc-sc-volume-test"
|
||||||
|
|
|
@ -108,15 +108,15 @@ resource "aws_s3_bucket_object" "default" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_application" "default" {
|
resource "aws_elastic_beanstalk_application" "default" {
|
||||||
name = "tf-test-name"
|
name = "tf-test-name-%d"
|
||||||
description = "tf-test-desc"
|
description = "tf-test-desc"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_application_version" "default" {
|
resource "aws_elastic_beanstalk_application_version" "default" {
|
||||||
application = "tf-test-name"
|
application = "tf-test-name-%d"
|
||||||
name = "tf-test-version-label"
|
name = "tf-test-version-label"
|
||||||
bucket = "${aws_s3_bucket.default.id}"
|
bucket = "${aws_s3_bucket.default.id}"
|
||||||
key = "${aws_s3_bucket_object.default.id}"
|
key = "${aws_s3_bucket_object.default.id}"
|
||||||
}
|
}
|
||||||
`, randInt)
|
`, randInt, randInt, randInt)
|
||||||
}
|
}
|
||||||
|
|
|
@ -388,7 +388,9 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.Set("subnets", flattenStringList(lb.Subnets))
|
d.Set("subnets", flattenStringList(lb.Subnets))
|
||||||
|
if lbAttrs.ConnectionSettings != nil {
|
||||||
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
||||||
|
}
|
||||||
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
|
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
|
||||||
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
||||||
d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled)
|
d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled)
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestAccAWSELB_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig,
|
Config: testAccAWSELBConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -70,7 +70,7 @@ func TestAccAWSELB_fullCharacterRange(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName),
|
Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
@ -93,14 +93,14 @@ func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBAccessLogs,
|
Config: testAccAWSELBAccessLogs,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBAccessLogsOn(rName),
|
Config: testAccAWSELBAccessLogsOn(rName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
@ -115,7 +115,7 @@ func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBAccessLogs,
|
Config: testAccAWSELBAccessLogs,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
@ -138,14 +138,14 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBAccessLogs,
|
Config: testAccAWSELBAccessLogs,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBAccessLogsDisabled(rName),
|
Config: testAccAWSELBAccessLogsDisabled(rName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
@ -160,7 +160,7 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBAccessLogs,
|
Config: testAccAWSELBAccessLogs,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
@ -182,7 +182,7 @@ func TestAccAWSELB_generatedName(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBGeneratedName,
|
Config: testAccAWSELBGeneratedName,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
@ -203,7 +203,7 @@ func TestAccAWSELB_availabilityZones(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig,
|
Config: testAccAWSELBConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -218,7 +218,7 @@ func TestAccAWSELB_availabilityZones(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig_AvailabilityZonesUpdate,
|
Config: testAccAWSELBConfig_AvailabilityZonesUpdate,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -244,7 +244,7 @@ func TestAccAWSELB_tags(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig,
|
Config: testAccAWSELBConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -254,7 +254,7 @@ func TestAccAWSELB_tags(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig_TagUpdate,
|
Config: testAccAWSELBConfig_TagUpdate,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -285,7 +285,7 @@ func TestAccAWSELB_iam_server_cert(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccELBIAMServerCertConfig(
|
Config: testAccELBIAMServerCertConfig(
|
||||||
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))),
|
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
@ -306,7 +306,7 @@ func TestAccAWSELB_swap_subnets(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig_subnets,
|
Config: testAccAWSELBConfig_subnets,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
||||||
|
@ -315,7 +315,7 @@ func TestAccAWSELB_swap_subnets(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig_subnet_swap,
|
Config: testAccAWSELBConfig_subnet_swap,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
||||||
|
@ -363,7 +363,7 @@ func TestAccAWSELB_InstanceAttaching(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig,
|
Config: testAccAWSELBConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -371,7 +371,7 @@ func TestAccAWSELB_InstanceAttaching(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigNewInstance,
|
Config: testAccAWSELBConfigNewInstance,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -391,7 +391,7 @@ func TestAccAWSELBUpdate_Listener(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig,
|
Config: testAccAWSELBConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -401,7 +401,7 @@ func TestAccAWSELBUpdate_Listener(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigListener_update,
|
Config: testAccAWSELBConfigListener_update,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -422,7 +422,7 @@ func TestAccAWSELB_HealthCheck(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigHealthCheck,
|
Config: testAccAWSELBConfigHealthCheck,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -450,14 +450,14 @@ func TestAccAWSELBUpdate_HealthCheck(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigHealthCheck,
|
Config: testAccAWSELBConfigHealthCheck,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_elb.bar", "health_check.0.healthy_threshold", "5"),
|
"aws_elb.bar", "health_check.0.healthy_threshold", "5"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigHealthCheck_update,
|
Config: testAccAWSELBConfigHealthCheck_update,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -477,7 +477,7 @@ func TestAccAWSELB_Timeout(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigIdleTimeout,
|
Config: testAccAWSELBConfigIdleTimeout,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||||
|
@ -497,7 +497,7 @@ func TestAccAWSELBUpdate_Timeout(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigIdleTimeout,
|
Config: testAccAWSELBConfigIdleTimeout,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -505,7 +505,7 @@ func TestAccAWSELBUpdate_Timeout(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigIdleTimeout_update,
|
Config: testAccAWSELBConfigIdleTimeout_update,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -524,7 +524,7 @@ func TestAccAWSELB_ConnectionDraining(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigConnectionDraining,
|
Config: testAccAWSELBConfigConnectionDraining,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -546,7 +546,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigConnectionDraining,
|
Config: testAccAWSELBConfigConnectionDraining,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -557,7 +557,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigConnectionDraining_update_timeout,
|
Config: testAccAWSELBConfigConnectionDraining_update_timeout,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -568,7 +568,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigConnectionDraining_update_disable,
|
Config: testAccAWSELBConfigConnectionDraining_update_disable,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -587,7 +587,7 @@ func TestAccAWSELB_SecurityGroups(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfig,
|
Config: testAccAWSELBConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
// ELBs get a default security group
|
// ELBs get a default security group
|
||||||
|
@ -596,7 +596,7 @@ func TestAccAWSELB_SecurityGroups(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSELBConfigSecurityGroups,
|
Config: testAccAWSELBConfigSecurityGroups,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
// Count should still be one as we swap in a custom security group
|
// Count should still be one as we swap in a custom security group
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsIamAccountAlias() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsIamAccountAliasCreate,
|
||||||
|
Read: resourceAwsIamAccountAliasRead,
|
||||||
|
Delete: resourceAwsIamAccountAliasDelete,
|
||||||
|
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"account_alias": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: validateAccountAlias,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamAccountAliasCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
account_alias := d.Get("account_alias").(string)
|
||||||
|
|
||||||
|
params := &iam.CreateAccountAliasInput{
|
||||||
|
AccountAlias: aws.String(account_alias),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.CreateAccountAlias(params)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating account alias with name %s", account_alias)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(account_alias)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
params := &iam.ListAccountAliasesInput{}
|
||||||
|
|
||||||
|
resp, err := conn.ListAccountAliases(params)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp == nil || len(resp.AccountAliases) == 0 {
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
account_alias := aws.StringValue(resp.AccountAliases[0])
|
||||||
|
|
||||||
|
d.SetId(account_alias)
|
||||||
|
d.Set("account_alias", account_alias)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamAccountAliasDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
account_alias := d.Get("account_alias").(string)
|
||||||
|
|
||||||
|
params := &iam.DeleteAccountAliasInput{
|
||||||
|
AccountAlias: aws.String(account_alias),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.DeleteAccountAlias(params)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting account alias with name %s", account_alias)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSIAMAccountAlias_basic(t *testing.T) {
|
||||||
|
var account_alias string
|
||||||
|
|
||||||
|
rstring := acctest.RandString(5)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSIAMAccountAliasConfig(rstring),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSIAMAccountAliasExists("aws_iam_account_alias.test", &account_alias),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSIAMAccountAliasDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_iam_account_alias" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &iam.ListAccountAliasesInput{}
|
||||||
|
|
||||||
|
resp, err := conn.ListAccountAliases(params)
|
||||||
|
|
||||||
|
if err != nil || resp == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.AccountAliases) > 0 {
|
||||||
|
return fmt.Errorf("Bad: Account alias still exists: %q", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSIAMAccountAliasExists(n string, a *string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||||
|
params := &iam.ListAccountAliasesInput{}
|
||||||
|
|
||||||
|
resp, err := conn.ListAccountAliases(params)
|
||||||
|
|
||||||
|
if err != nil || resp == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.AccountAliases) == 0 {
|
||||||
|
return fmt.Errorf("Bad: Account alias %q does not exist", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
*a = aws.StringValue(resp.AccountAliases[0])
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSIAMAccountAliasConfig(rstring string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_iam_account_alias" "test" {
|
||||||
|
account_alias = "terraform-%s-alias"
|
||||||
|
}
|
||||||
|
`, rstring)
|
||||||
|
}
|
|
@ -2,10 +2,12 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/iam"
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
@ -70,6 +72,11 @@ func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) er
|
||||||
}
|
}
|
||||||
out, err := iamconn.GetSAMLProvider(input)
|
out, err := iamconn.GetSAMLProvider(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
|
||||||
|
log.Printf("[WARN] IAM SAML Provider %q not found.", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -548,7 +548,7 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil {
|
if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil {
|
||||||
log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%d): %s", d.Id(), err)
|
log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("ebs_optimized", instance.EbsOptimized)
|
d.Set("ebs_optimized", instance.EbsOptimized)
|
||||||
|
@ -1034,11 +1034,16 @@ func readBlockDeviceMappingsFromConfig(
|
||||||
|
|
||||||
if v, ok := bd["volume_type"].(string); ok && v != "" {
|
if v, ok := bd["volume_type"].(string); ok && v != "" {
|
||||||
ebs.VolumeType = aws.String(v)
|
ebs.VolumeType = aws.String(v)
|
||||||
}
|
if "io1" == strings.ToLower(v) {
|
||||||
|
// Condition: This parameter is required for requests to create io1
|
||||||
|
// volumes; it is not used in requests to create gp2, st1, sc1, or
|
||||||
|
// standard volumes.
|
||||||
|
// See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
|
||||||
if v, ok := bd["iops"].(int); ok && v > 0 {
|
if v, ok := bd["iops"].(int); ok && v > 0 {
|
||||||
ebs.Iops = aws.Int64(int64(v))
|
ebs.Iops = aws.Int64(int64(v))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{
|
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{
|
||||||
DeviceName: aws.String(bd["device_name"].(string)),
|
DeviceName: aws.String(bd["device_name"].(string)),
|
||||||
|
|
|
@ -1060,7 +1060,6 @@ resource "aws_instance" "foo" {
|
||||||
root_block_device {
|
root_block_device {
|
||||||
volume_type = "gp2"
|
volume_type = "gp2"
|
||||||
volume_size = 11
|
volume_size = 11
|
||||||
iops = 330
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
|
@ -7,12 +7,14 @@ import (
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/lambda"
|
"github.com/aws/aws-sdk-go/service/lambda"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
||||||
var conf lambda.AliasConfiguration
|
var conf lambda.AliasConfiguration
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
@ -20,7 +22,7 @@ func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
||||||
CheckDestroy: testAccCheckAwsLambdaAliasDestroy,
|
CheckDestroy: testAccCheckAwsLambdaAliasDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAwsLambdaAliasConfig,
|
Config: testAccAwsLambdaAliasConfig(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAwsLambdaAliasExists("aws_lambda_alias.lambda_alias_test", &conf),
|
testAccCheckAwsLambdaAliasExists("aws_lambda_alias.lambda_alias_test", &conf),
|
||||||
testAccCheckAwsLambdaAttributes(&conf),
|
testAccCheckAwsLambdaAttributes(&conf),
|
||||||
|
@ -95,9 +97,10 @@ func testAccCheckAwsLambdaAttributes(mapping *lambda.AliasConfiguration) resourc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAwsLambdaAliasConfig = `
|
func testAccAwsLambdaAliasConfig(rInt int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
name = "iam_for_lambda"
|
name = "iam_for_lambda_%d"
|
||||||
|
|
||||||
assume_role_policy = <<EOF
|
assume_role_policy = <<EOF
|
||||||
{
|
{
|
||||||
|
@ -117,7 +120,7 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_policy" "policy_for_role" {
|
resource "aws_iam_policy" "policy_for_role" {
|
||||||
name = "policy_for_role"
|
name = "policy_for_role_%d"
|
||||||
path = "/"
|
path = "/"
|
||||||
description = "IAM policy for for Lamda alias testing"
|
description = "IAM policy for for Lamda alias testing"
|
||||||
|
|
||||||
|
@ -138,7 +141,7 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
|
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
|
||||||
name = "policy_attachment_for_role"
|
name = "policy_attachment_for_role_%d"
|
||||||
roles = ["${aws_iam_role.iam_for_lambda.name}"]
|
roles = ["${aws_iam_role.iam_for_lambda.name}"]
|
||||||
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
|
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
|
||||||
}
|
}
|
||||||
|
@ -156,5 +159,5 @@ resource "aws_lambda_alias" "lambda_alias_test" {
|
||||||
description = "a sample description"
|
description = "a sample description"
|
||||||
function_name = "${aws_lambda_function.lambda_function_test_create.arn}"
|
function_name = "${aws_lambda_function.lambda_function_test_create.arn}"
|
||||||
function_version = "$LATEST"
|
function_version = "$LATEST"
|
||||||
|
}`, rInt, rInt, rInt)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
|
@ -389,9 +389,9 @@ func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) err
|
||||||
last := p.Versions[len(p.Versions)-1]
|
last := p.Versions[len(p.Versions)-1]
|
||||||
lastVersion = *last.Version
|
lastVersion = *last.Version
|
||||||
lastQualifiedArn = *last.FunctionArn
|
lastQualifiedArn = *last.FunctionArn
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -416,6 +416,7 @@ func listVersionsByFunctionPages(c *lambda.Lambda, input *lambda.ListVersionsByF
|
||||||
if !shouldContinue || lastPage {
|
if !shouldContinue || lastPage {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
input.Marker = page.NextMarker
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -292,6 +292,8 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
||||||
}
|
}
|
||||||
defer os.Remove(path)
|
defer os.Remove(path)
|
||||||
|
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -301,7 +303,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
||||||
PreConfig: func() {
|
PreConfig: func() {
|
||||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
||||||
},
|
},
|
||||||
Config: genAWSLambdaFunctionConfig_local(path),
|
Config: genAWSLambdaFunctionConfig_local(path, rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
||||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
||||||
|
@ -313,7 +315,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
||||||
PreConfig: func() {
|
PreConfig: func() {
|
||||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
||||||
},
|
},
|
||||||
Config: genAWSLambdaFunctionConfig_local(path),
|
Config: genAWSLambdaFunctionConfig_local(path, rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
||||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
||||||
|
@ -387,6 +389,8 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
||||||
bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger)
|
bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger)
|
||||||
key := "lambda-func.zip"
|
key := "lambda-func.zip"
|
||||||
|
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -397,7 +401,7 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
||||||
// Upload 1st version
|
// Upload 1st version
|
||||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
||||||
},
|
},
|
||||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
||||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
||||||
|
@ -411,12 +415,12 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
||||||
// Upload 2nd version
|
// Upload 2nd version
|
||||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
||||||
},
|
},
|
||||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||||
},
|
},
|
||||||
// Extra step because of missing ComputedWhen
|
// Extra step because of missing ComputedWhen
|
||||||
// See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330
|
// See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330
|
||||||
{
|
{
|
||||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
||||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
||||||
|
@ -1101,7 +1105,7 @@ resource "aws_lambda_function" "lambda_function_test" {
|
||||||
|
|
||||||
const testAccAWSLambdaFunctionConfig_local_tpl = `
|
const testAccAWSLambdaFunctionConfig_local_tpl = `
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
name = "iam_for_lambda"
|
name = "iam_for_lambda_%d"
|
||||||
assume_role_policy = <<EOF
|
assume_role_policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
|
@ -1128,8 +1132,8 @@ resource "aws_lambda_function" "lambda_function_local" {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
func genAWSLambdaFunctionConfig_local(filePath string) string {
|
func genAWSLambdaFunctionConfig_local(filePath string, rInt int) string {
|
||||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl,
|
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl, rInt,
|
||||||
filePath, filePath)
|
filePath, filePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1182,7 +1186,7 @@ resource "aws_s3_bucket_object" "o" {
|
||||||
etag = "${md5(file("%s"))}"
|
etag = "${md5(file("%s"))}"
|
||||||
}
|
}
|
||||||
resource "aws_iam_role" "iam_for_lambda" {
|
resource "aws_iam_role" "iam_for_lambda" {
|
||||||
name = "iam_for_lambda"
|
name = "iam_for_lambda_%d"
|
||||||
assume_role_policy = <<EOF
|
assume_role_policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
|
@ -1210,9 +1214,9 @@ resource "aws_lambda_function" "lambda_function_s3" {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
func genAWSLambdaFunctionConfig_s3(bucket, key, path string) string {
|
func genAWSLambdaFunctionConfig_s3(bucket, key, path string, rInt int) string {
|
||||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_tpl,
|
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_tpl,
|
||||||
bucket, key, path, path)
|
bucket, key, path, path, rInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key, path string) string {
|
func testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key, path string) string {
|
||||||
|
|
|
@ -28,20 +28,20 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"vpc_id": &schema.Schema{
|
"vpc_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: false,
|
Computed: false,
|
||||||
},
|
},
|
||||||
"subnet_id": &schema.Schema{
|
"subnet_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: false,
|
Computed: false,
|
||||||
Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead",
|
Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead",
|
||||||
},
|
},
|
||||||
"subnet_ids": &schema.Schema{
|
"subnet_ids": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -49,42 +49,46 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
"ingress": &schema.Schema{
|
"ingress": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Required: false,
|
Required: false,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"from_port": &schema.Schema{
|
"from_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"to_port": &schema.Schema{
|
"to_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"rule_no": &schema.Schema{
|
"rule_no": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"action": &schema.Schema{
|
"action": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"protocol": &schema.Schema{
|
"protocol": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"cidr_block": &schema.Schema{
|
"cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"icmp_type": &schema.Schema{
|
"ipv6_cidr_block": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"icmp_type": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"icmp_code": &schema.Schema{
|
"icmp_code": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -92,42 +96,46 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
||||||
},
|
},
|
||||||
Set: resourceAwsNetworkAclEntryHash,
|
Set: resourceAwsNetworkAclEntryHash,
|
||||||
},
|
},
|
||||||
"egress": &schema.Schema{
|
"egress": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Required: false,
|
Required: false,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"from_port": &schema.Schema{
|
"from_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"to_port": &schema.Schema{
|
"to_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"rule_no": &schema.Schema{
|
"rule_no": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"action": &schema.Schema{
|
"action": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"protocol": &schema.Schema{
|
"protocol": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"cidr_block": &schema.Schema{
|
"cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"icmp_type": &schema.Schema{
|
"ipv6_cidr_block": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"icmp_type": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"icmp_code": &schema.Schema{
|
"icmp_code": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -389,6 +397,7 @@ func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if add.CidrBlock != nil {
|
||||||
// AWS mutates the CIDR block into a network implied by the IP and
|
// AWS mutates the CIDR block into a network implied by the IP and
|
||||||
// mask provided. This results in hashing inconsistencies between
|
// mask provided. This results in hashing inconsistencies between
|
||||||
// the local config file and the state returned by the API. Error
|
// the local config file and the state returned by the API. Error
|
||||||
|
@ -396,18 +405,28 @@ func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2
|
||||||
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
|
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Add new Acl entry
|
createOpts := &ec2.CreateNetworkAclEntryInput{
|
||||||
_, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{
|
|
||||||
NetworkAclId: aws.String(d.Id()),
|
NetworkAclId: aws.String(d.Id()),
|
||||||
CidrBlock: add.CidrBlock,
|
|
||||||
Egress: add.Egress,
|
Egress: add.Egress,
|
||||||
PortRange: add.PortRange,
|
PortRange: add.PortRange,
|
||||||
Protocol: add.Protocol,
|
Protocol: add.Protocol,
|
||||||
RuleAction: add.RuleAction,
|
RuleAction: add.RuleAction,
|
||||||
RuleNumber: add.RuleNumber,
|
RuleNumber: add.RuleNumber,
|
||||||
IcmpTypeCode: add.IcmpTypeCode,
|
IcmpTypeCode: add.IcmpTypeCode,
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if add.CidrBlock != nil {
|
||||||
|
createOpts.CidrBlock = add.CidrBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
if add.Ipv6CidrBlock != nil {
|
||||||
|
createOpts.Ipv6CidrBlock = add.Ipv6CidrBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new Acl entry
|
||||||
|
_, connErr := conn.CreateNetworkAclEntry(createOpts)
|
||||||
if connErr != nil {
|
if connErr != nil {
|
||||||
return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
|
return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
|
||||||
}
|
}
|
||||||
|
@ -520,7 +539,13 @@ func resourceAwsNetworkAclEntryHash(v interface{}) int {
|
||||||
buf.WriteString(fmt.Sprintf("%s-", protocol))
|
buf.WriteString(fmt.Sprintf("%s-", protocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
buf.WriteString(fmt.Sprintf("%s-", m["cidr_block"].(string)))
|
if v, ok := m["cidr_block"]; ok {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := m["ipv6_cidr_block"]; ok {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := m["ssl_certificate_id"]; ok {
|
if v, ok := m["ssl_certificate_id"]; ok {
|
||||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
|
@ -539,11 +564,11 @@ func resourceAwsNetworkAclEntryHash(v interface{}) int {
|
||||||
func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) {
|
func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) {
|
||||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||||
Filters: []*ec2.Filter{
|
Filters: []*ec2.Filter{
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("default"),
|
Name: aws.String("default"),
|
||||||
Values: []*string{aws.String("true")},
|
Values: []*string{aws.String("true")},
|
||||||
},
|
},
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("vpc-id"),
|
Name: aws.String("vpc-id"),
|
||||||
Values: []*string{aws.String(vpc_id)},
|
Values: []*string{aws.String(vpc_id)},
|
||||||
},
|
},
|
||||||
|
@ -559,7 +584,7 @@ func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.Network
|
||||||
func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) {
|
func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) {
|
||||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||||
Filters: []*ec2.Filter{
|
Filters: []*ec2.Filter{
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("association.subnet-id"),
|
Name: aws.String("association.subnet-id"),
|
||||||
Values: []*string{aws.String(subnetId)},
|
Values: []*string{aws.String(subnetId)},
|
||||||
},
|
},
|
||||||
|
@ -587,8 +612,12 @@ func networkAclEntriesToMapList(networkAcls []*ec2.NetworkAclEntry) []map[string
|
||||||
acl := make(map[string]interface{})
|
acl := make(map[string]interface{})
|
||||||
acl["rule_no"] = *entry.RuleNumber
|
acl["rule_no"] = *entry.RuleNumber
|
||||||
acl["action"] = *entry.RuleAction
|
acl["action"] = *entry.RuleAction
|
||||||
|
if entry.CidrBlock != nil {
|
||||||
acl["cidr_block"] = *entry.CidrBlock
|
acl["cidr_block"] = *entry.CidrBlock
|
||||||
|
}
|
||||||
|
if entry.Ipv6CidrBlock != nil {
|
||||||
|
acl["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
|
||||||
|
}
|
||||||
// The AWS network ACL API only speaks protocol numbers, and
|
// The AWS network ACL API only speaks protocol numbers, and
|
||||||
// that's all we record.
|
// that's all we record.
|
||||||
if _, err := strconv.Atoi(*entry.Protocol); err != nil {
|
if _, err := strconv.Atoi(*entry.Protocol); err != nil {
|
||||||
|
|
|
@ -21,54 +21,59 @@ func resourceAwsNetworkAclRule() *schema.Resource {
|
||||||
Delete: resourceAwsNetworkAclRuleDelete,
|
Delete: resourceAwsNetworkAclRuleDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"network_acl_id": &schema.Schema{
|
"network_acl_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"rule_number": &schema.Schema{
|
"rule_number": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"egress": &schema.Schema{
|
"egress": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
"protocol": &schema.Schema{
|
"protocol": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"rule_action": &schema.Schema{
|
"rule_action": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"cidr_block": &schema.Schema{
|
"cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"from_port": &schema.Schema{
|
"ipv6_cidr_block": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"from_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"to_port": &schema.Schema{
|
"to_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"icmp_type": &schema.Schema{
|
"icmp_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateICMPArgumentValue,
|
ValidateFunc: validateICMPArgumentValue,
|
||||||
},
|
},
|
||||||
"icmp_code": &schema.Schema{
|
"icmp_code": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
@ -97,7 +102,6 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
|
||||||
Egress: aws.Bool(d.Get("egress").(bool)),
|
Egress: aws.Bool(d.Get("egress").(bool)),
|
||||||
RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
|
RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
|
||||||
Protocol: aws.String(strconv.Itoa(p)),
|
Protocol: aws.String(strconv.Itoa(p)),
|
||||||
CidrBlock: aws.String(d.Get("cidr_block").(string)),
|
|
||||||
RuleAction: aws.String(d.Get("rule_action").(string)),
|
RuleAction: aws.String(d.Get("rule_action").(string)),
|
||||||
PortRange: &ec2.PortRange{
|
PortRange: &ec2.PortRange{
|
||||||
From: aws.Int64(int64(d.Get("from_port").(int))),
|
From: aws.Int64(int64(d.Get("from_port").(int))),
|
||||||
|
@ -105,6 +109,14 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("cidr_block"); ok {
|
||||||
|
params.CidrBlock = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("ipv6_cidr_block"); ok {
|
||||||
|
params.Ipv6CidrBlock = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
// Specify additional required fields for ICMP. For the list
|
// Specify additional required fields for ICMP. For the list
|
||||||
// of ICMP codes and types, see: http://www.nthelp.com/icmp.html
|
// of ICMP codes and types, see: http://www.nthelp.com/icmp.html
|
||||||
if p == 1 {
|
if p == 1 {
|
||||||
|
@ -160,6 +172,7 @@ func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) err
|
||||||
|
|
||||||
d.Set("rule_number", resp.RuleNumber)
|
d.Set("rule_number", resp.RuleNumber)
|
||||||
d.Set("cidr_block", resp.CidrBlock)
|
d.Set("cidr_block", resp.CidrBlock)
|
||||||
|
d.Set("ipv6_cidr_block", resp.Ipv6CidrBlock)
|
||||||
d.Set("egress", resp.Egress)
|
d.Set("egress", resp.Egress)
|
||||||
if resp.IcmpTypeCode != nil {
|
if resp.IcmpTypeCode != nil {
|
||||||
d.Set("icmp_code", resp.IcmpTypeCode.Code)
|
d.Set("icmp_code", resp.IcmpTypeCode.Code)
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclRuleBasicConfig,
|
Config: testAccAWSNetworkAclRuleBasicConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
||||||
|
@ -32,6 +32,24 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSNetworkAclRule_ipv6(t *testing.T) {
|
||||||
|
var networkAcl ec2.NetworkAcl
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSNetworkAclRuleIpv6Config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestResourceAWSNetworkAclRule_validateICMPArgumentValue(t *testing.T) {
|
func TestResourceAWSNetworkAclRule_validateICMPArgumentValue(t *testing.T) {
|
||||||
type testCases struct {
|
type testCases struct {
|
||||||
Value string
|
Value string
|
||||||
|
@ -195,3 +213,23 @@ resource "aws_network_acl_rule" "wibble" {
|
||||||
icmp_code = -1
|
icmp_code = -1
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccAWSNetworkAclRuleIpv6Config = `
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.3.0.0/16"
|
||||||
|
}
|
||||||
|
resource "aws_network_acl" "bar" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
}
|
||||||
|
resource "aws_network_acl_rule" "baz" {
|
||||||
|
network_acl_id = "${aws_network_acl.bar.id}"
|
||||||
|
rule_number = 150
|
||||||
|
egress = false
|
||||||
|
protocol = "tcp"
|
||||||
|
rule_action = "allow"
|
||||||
|
ipv6_cidr_block = "::/0"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
}
|
||||||
|
|
||||||
|
`
|
||||||
|
|
|
@ -20,34 +20,34 @@ func TestAccAWSNetworkAcl_EgressAndIngressRules(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclEgressNIngressConfig,
|
Config: testAccAWSNetworkAclEgressNIngressConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "ingress.109047673.protocol", "6"),
|
"aws_network_acl.bar", "ingress.1871939009.protocol", "6"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "ingress.109047673.rule_no", "1"),
|
"aws_network_acl.bar", "ingress.1871939009.rule_no", "1"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "ingress.109047673.from_port", "80"),
|
"aws_network_acl.bar", "ingress.1871939009.from_port", "80"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "ingress.109047673.to_port", "80"),
|
"aws_network_acl.bar", "ingress.1871939009.to_port", "80"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "ingress.109047673.action", "allow"),
|
"aws_network_acl.bar", "ingress.1871939009.action", "allow"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "ingress.109047673.cidr_block", "10.3.0.0/18"),
|
"aws_network_acl.bar", "ingress.1871939009.cidr_block", "10.3.0.0/18"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "egress.868403673.protocol", "6"),
|
"aws_network_acl.bar", "egress.3111164687.protocol", "6"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "egress.868403673.rule_no", "2"),
|
"aws_network_acl.bar", "egress.3111164687.rule_no", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "egress.868403673.from_port", "443"),
|
"aws_network_acl.bar", "egress.3111164687.from_port", "443"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "egress.868403673.to_port", "443"),
|
"aws_network_acl.bar", "egress.3111164687.to_port", "443"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "egress.868403673.cidr_block", "10.3.0.0/18"),
|
"aws_network_acl.bar", "egress.3111164687.cidr_block", "10.3.0.0/18"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.bar", "egress.868403673.action", "allow"),
|
"aws_network_acl.bar", "egress.3111164687.action", "allow"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -63,23 +63,22 @@ func TestAccAWSNetworkAcl_OnlyIngressRules_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclIngressConfig,
|
Config: testAccAWSNetworkAclIngressConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||||
// testAccCheckSubnetAssociation("aws_network_acl.foos", "aws_subnet.blob"),
|
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.protocol", "6"),
|
"aws_network_acl.foos", "ingress.4245812720.protocol", "6"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.rule_no", "2"),
|
"aws_network_acl.foos", "ingress.4245812720.rule_no", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.from_port", "443"),
|
"aws_network_acl.foos", "ingress.4245812720.from_port", "443"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.to_port", "443"),
|
"aws_network_acl.foos", "ingress.4245812720.to_port", "443"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.action", "deny"),
|
"aws_network_acl.foos", "ingress.4245812720.action", "deny"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.cidr_block", "10.2.0.0/18"),
|
"aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -95,46 +94,46 @@ func TestAccAWSNetworkAcl_OnlyIngressRules_update(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclIngressConfig,
|
Config: testAccAWSNetworkAclIngressConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||||
testIngressRuleLength(&networkAcl, 2),
|
testIngressRuleLength(&networkAcl, 2),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.protocol", "6"),
|
"aws_network_acl.foos", "ingress.401088754.protocol", "6"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.rule_no", "1"),
|
"aws_network_acl.foos", "ingress.401088754.rule_no", "1"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.from_port", "0"),
|
"aws_network_acl.foos", "ingress.401088754.from_port", "0"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.to_port", "22"),
|
"aws_network_acl.foos", "ingress.401088754.to_port", "22"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.action", "deny"),
|
"aws_network_acl.foos", "ingress.401088754.action", "deny"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.cidr_block", "10.2.0.0/18"),
|
"aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.from_port", "443"),
|
"aws_network_acl.foos", "ingress.4245812720.from_port", "443"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.1451312565.rule_no", "2"),
|
"aws_network_acl.foos", "ingress.4245812720.rule_no", "2"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclIngressConfigChange,
|
Config: testAccAWSNetworkAclIngressConfigChange,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||||
testIngressRuleLength(&networkAcl, 1),
|
testIngressRuleLength(&networkAcl, 1),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.protocol", "6"),
|
"aws_network_acl.foos", "ingress.401088754.protocol", "6"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.rule_no", "1"),
|
"aws_network_acl.foos", "ingress.401088754.rule_no", "1"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.from_port", "0"),
|
"aws_network_acl.foos", "ingress.401088754.from_port", "0"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.to_port", "22"),
|
"aws_network_acl.foos", "ingress.401088754.to_port", "22"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.action", "deny"),
|
"aws_network_acl.foos", "ingress.401088754.action", "deny"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_network_acl.foos", "ingress.2048097841.cidr_block", "10.2.0.0/18"),
|
"aws_network_acl.foos", "ingress.401088754.cidr_block", "10.2.0.0/18"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -150,7 +149,7 @@ func TestAccAWSNetworkAcl_OnlyEgressRules(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclEgressConfig,
|
Config: testAccAWSNetworkAclEgressConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bond", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.bond", &networkAcl),
|
||||||
|
@ -169,13 +168,13 @@ func TestAccAWSNetworkAcl_SubnetChange(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclSubnetConfig,
|
Config: testAccAWSNetworkAclSubnetConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclSubnetConfigChange,
|
Config: testAccAWSNetworkAclSubnetConfigChange,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckSubnetIsNotAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
testAccCheckSubnetIsNotAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
||||||
|
@ -206,7 +205,7 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclSubnet_SubnetIds,
|
Config: testAccAWSNetworkAclSubnet_SubnetIds,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||||
|
@ -216,7 +215,7 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclSubnet_SubnetIdsUpdate,
|
Config: testAccAWSNetworkAclSubnet_SubnetIdsUpdate,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||||
|
@ -230,6 +229,37 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSNetworkAcl_ipv6Rules(t *testing.T) {
|
||||||
|
var networkAcl ec2.NetworkAcl
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "aws_network_acl.foos",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSNetworkAclIpv6Config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_network_acl.foos", "ingress.1976110835.protocol", "6"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_network_acl.foos", "ingress.1976110835.rule_no", "1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_network_acl.foos", "ingress.1976110835.from_port", "0"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_network_acl.foos", "ingress.1976110835.to_port", "22"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_network_acl.foos", "ingress.1976110835.action", "allow"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_network_acl.foos", "ingress.1976110835.ipv6_cidr_block", "::/0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
||||||
var networkAcl ec2.NetworkAcl
|
var networkAcl ec2.NetworkAcl
|
||||||
|
|
||||||
|
@ -239,7 +269,7 @@ func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSNetworkAclEsp,
|
Config: testAccAWSNetworkAclEsp,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSNetworkAclExists("aws_network_acl.testesp", &networkAcl),
|
testAccCheckAWSNetworkAclExists("aws_network_acl.testesp", &networkAcl),
|
||||||
|
@ -336,7 +366,7 @@ func testAccCheckSubnetIsAssociatedWithAcl(acl string, sub string) resource.Test
|
||||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||||
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
||||||
Filters: []*ec2.Filter{
|
Filters: []*ec2.Filter{
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("association.subnet-id"),
|
Name: aws.String("association.subnet-id"),
|
||||||
Values: []*string{aws.String(subnet.Primary.ID)},
|
Values: []*string{aws.String(subnet.Primary.ID)},
|
||||||
},
|
},
|
||||||
|
@ -362,7 +392,7 @@ func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resourc
|
||||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||||
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
||||||
Filters: []*ec2.Filter{
|
Filters: []*ec2.Filter{
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("association.subnet-id"),
|
Name: aws.String("association.subnet-id"),
|
||||||
Values: []*string{aws.String(subnet.Primary.ID)},
|
Values: []*string{aws.String(subnet.Primary.ID)},
|
||||||
},
|
},
|
||||||
|
@ -379,6 +409,33 @@ func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resourc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const testAccAWSNetworkAclIpv6Config = `
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
tags {
|
||||||
|
Name = "TestAccAWSNetworkAcl_ipv6Rules"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource "aws_subnet" "blob" {
|
||||||
|
cidr_block = "10.1.1.0/24"
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
map_public_ip_on_launch = true
|
||||||
|
}
|
||||||
|
resource "aws_network_acl" "foos" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
ingress = {
|
||||||
|
protocol = "tcp"
|
||||||
|
rule_no = 1
|
||||||
|
action = "allow"
|
||||||
|
ipv6_cidr_block = "::/0"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 22
|
||||||
|
}
|
||||||
|
|
||||||
|
subnet_ids = ["${aws_subnet.blob.id}"]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccAWSNetworkAclIngressConfig = `
|
const testAccAWSNetworkAclIngressConfig = `
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
|
|
|
@ -21,21 +21,21 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||||
Update: resourceAwsOpsworksApplicationUpdate,
|
Update: resourceAwsOpsworksApplicationUpdate,
|
||||||
Delete: resourceAwsOpsworksApplicationDelete,
|
Delete: resourceAwsOpsworksApplicationDelete,
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"id": &schema.Schema{
|
"id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"name": &schema.Schema{
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"short_name": &schema.Schema{
|
"short_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
// aws-flow-ruby | java | rails | php | nodejs | static | other
|
// aws-flow-ruby | java | rails | php | nodejs | static | other
|
||||||
"type": &schema.Schema{
|
"type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
@ -56,62 +56,62 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"stack_id": &schema.Schema{
|
"stack_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
// TODO: the following 4 vals are really part of the Attributes array. We should validate that only ones relevant to the chosen type are set, perhaps. (what is the default type? how do they map?)
|
// TODO: the following 4 vals are really part of the Attributes array. We should validate that only ones relevant to the chosen type are set, perhaps. (what is the default type? how do they map?)
|
||||||
"document_root": &schema.Schema{
|
"document_root": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
//Default: "public",
|
//Default: "public",
|
||||||
},
|
},
|
||||||
"rails_env": &schema.Schema{
|
"rails_env": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
//Default: "production",
|
//Default: "production",
|
||||||
},
|
},
|
||||||
"auto_bundle_on_deploy": &schema.Schema{
|
"auto_bundle_on_deploy": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
//Default: true,
|
//Default: true,
|
||||||
},
|
},
|
||||||
"aws_flow_ruby_settings": &schema.Schema{
|
"aws_flow_ruby_settings": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"app_source": &schema.Schema{
|
"app_source": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"type": &schema.Schema{
|
"type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"url": &schema.Schema{
|
"url": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"username": &schema.Schema{
|
"username": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"password": &schema.Schema{
|
"password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"revision": &schema.Schema{
|
"revision": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"ssh_key": &schema.Schema{
|
"ssh_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -121,41 +121,41 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||||
// AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.
|
// AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.
|
||||||
// anything beside auto select will lead into failure in case the instance doesn't exist
|
// anything beside auto select will lead into failure in case the instance doesn't exist
|
||||||
// XXX: validation?
|
// XXX: validation?
|
||||||
"data_source_type": &schema.Schema{
|
"data_source_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"data_source_database_name": &schema.Schema{
|
"data_source_database_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"data_source_arn": &schema.Schema{
|
"data_source_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"description": &schema.Schema{
|
"description": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"domains": &schema.Schema{
|
"domains": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
"environment": &schema.Schema{
|
"environment": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"key": &schema.Schema{
|
"key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"value": &schema.Schema{
|
"value": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"secure": &schema.Schema{
|
"secure": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: true,
|
Default: true,
|
||||||
|
@ -163,18 +163,18 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"enable_ssl": &schema.Schema{
|
"enable_ssl": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
"ssl_configuration": &schema.Schema{
|
"ssl_configuration": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
//Computed: true,
|
//Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"certificate": &schema.Schema{
|
"certificate": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
|
@ -186,7 +186,7 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"private_key": &schema.Schema{
|
"private_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
|
@ -198,7 +198,7 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"chain": &schema.Schema{
|
"chain": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
|
|
|
@ -8,25 +8,30 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/opsworks"
|
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSOpsworksApplication(t *testing.T) {
|
func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||||
var opsapp opsworks.App
|
var opsapp opsworks.App
|
||||||
|
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
name := fmt.Sprintf("tf-ops-acc-application-%d", rInt)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksApplicationDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksApplicationDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksApplicationCreate,
|
Config: testAccAwsOpsworksApplicationCreate(name),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksApplicationExists(
|
testAccCheckAWSOpsworksApplicationExists(
|
||||||
"aws_opsworks_application.tf-acc-app", &opsapp),
|
"aws_opsworks_application.tf-acc-app", &opsapp),
|
||||||
testAccCheckAWSOpsworksCreateAppAttributes(&opsapp),
|
testAccCheckAWSOpsworksCreateAppAttributes(&opsapp),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "name", "tf-ops-acc-application",
|
"aws_opsworks_application.tf-acc-app", "name", name,
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "type", "other",
|
"aws_opsworks_application.tf-acc-app", "type", "other",
|
||||||
|
@ -34,14 +39,14 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "enable_ssl", "false",
|
"aws_opsworks_application.tf-acc-app", "enable_ssl", "false",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "ssl_configuration", "",
|
"aws_opsworks_application.tf-acc-app", "ssl_configuration",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "domains", "",
|
"aws_opsworks_application.tf-acc-app", "domains",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "app_source", "",
|
"aws_opsworks_application.tf-acc-app", "app_source",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.key", "key1",
|
"aws_opsworks_application.tf-acc-app", "environment.3077298702.key", "key1",
|
||||||
|
@ -49,22 +54,22 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", "",
|
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "document_root", "foo",
|
"aws_opsworks_application.tf-acc-app", "document_root", "foo",
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksApplicationUpdate,
|
Config: testAccAwsOpsworksApplicationUpdate(name),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksApplicationExists(
|
testAccCheckAWSOpsworksApplicationExists(
|
||||||
"aws_opsworks_application.tf-acc-app", &opsapp),
|
"aws_opsworks_application.tf-acc-app", &opsapp),
|
||||||
testAccCheckAWSOpsworksUpdateAppAttributes(&opsapp),
|
testAccCheckAWSOpsworksUpdateAppAttributes(&opsapp),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "name", "tf-ops-acc-application",
|
"aws_opsworks_application.tf-acc-app", "name", name,
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "type", "rails",
|
"aws_opsworks_application.tf-acc-app", "type", "rails",
|
||||||
|
@ -117,8 +122,8 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", "",
|
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_application.tf-acc-app", "document_root", "root",
|
"aws_opsworks_application.tf-acc-app", "document_root", "root",
|
||||||
|
@ -188,7 +193,7 @@ func testAccCheckAWSOpsworksCreateAppAttributes(
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedEnv := []*opsworks.EnvironmentVariable{
|
expectedEnv := []*opsworks.EnvironmentVariable{
|
||||||
&opsworks.EnvironmentVariable{
|
{
|
||||||
Key: aws.String("key1"),
|
Key: aws.String("key1"),
|
||||||
Value: aws.String("value1"),
|
Value: aws.String("value1"),
|
||||||
Secure: aws.Bool(false),
|
Secure: aws.Bool(false),
|
||||||
|
@ -248,12 +253,12 @@ func testAccCheckAWSOpsworksUpdateAppAttributes(
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedEnv := []*opsworks.EnvironmentVariable{
|
expectedEnv := []*opsworks.EnvironmentVariable{
|
||||||
&opsworks.EnvironmentVariable{
|
{
|
||||||
Key: aws.String("key2"),
|
Key: aws.String("key2"),
|
||||||
Value: aws.String("*****FILTERED*****"),
|
Value: aws.String("*****FILTERED*****"),
|
||||||
Secure: aws.Bool(true),
|
Secure: aws.Bool(true),
|
||||||
},
|
},
|
||||||
&opsworks.EnvironmentVariable{
|
{
|
||||||
Key: aws.String("key1"),
|
Key: aws.String("key1"),
|
||||||
Value: aws.String("value1"),
|
Value: aws.String("value1"),
|
||||||
Secure: aws.Bool(false),
|
Secure: aws.Bool(false),
|
||||||
|
@ -308,10 +313,12 @@ func testAccCheckAwsOpsworksApplicationDestroy(s *terraform.State) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccAwsOpsworksApplicationCreate = testAccAwsOpsworksStackConfigVpcCreate("tf-ops-acc-application") + `
|
func testAccAwsOpsworksApplicationCreate(name string) string {
|
||||||
|
return testAccAwsOpsworksStackConfigVpcCreate(name) +
|
||||||
|
fmt.Sprintf(`
|
||||||
resource "aws_opsworks_application" "tf-acc-app" {
|
resource "aws_opsworks_application" "tf-acc-app" {
|
||||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||||
name = "tf-ops-acc-application"
|
name = "%s"
|
||||||
type = "other"
|
type = "other"
|
||||||
enable_ssl = false
|
enable_ssl = false
|
||||||
app_source ={
|
app_source ={
|
||||||
|
@ -320,12 +327,15 @@ resource "aws_opsworks_application" "tf-acc-app" {
|
||||||
environment = { key = "key1" value = "value1" secure = false}
|
environment = { key = "key1" value = "value1" secure = false}
|
||||||
document_root = "foo"
|
document_root = "foo"
|
||||||
}
|
}
|
||||||
`
|
`, name)
|
||||||
|
}
|
||||||
|
|
||||||
var testAccAwsOpsworksApplicationUpdate = testAccAwsOpsworksStackConfigVpcCreate("tf-ops-acc-application") + `
|
func testAccAwsOpsworksApplicationUpdate(name string) string {
|
||||||
|
return testAccAwsOpsworksStackConfigVpcCreate(name) +
|
||||||
|
fmt.Sprintf(`
|
||||||
resource "aws_opsworks_application" "tf-acc-app" {
|
resource "aws_opsworks_application" "tf-acc-app" {
|
||||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||||
name = "tf-ops-acc-application"
|
name = "%s"
|
||||||
type = "rails"
|
type = "rails"
|
||||||
domains = ["example.com", "sub.example.com"]
|
domains = ["example.com", "sub.example.com"]
|
||||||
enable_ssl = true
|
enable_ssl = true
|
||||||
|
@ -372,4 +382,5 @@ EOS
|
||||||
auto_bundle_on_deploy = "true"
|
auto_bundle_on_deploy = "true"
|
||||||
rails_env = "staging"
|
rails_env = "staging"
|
||||||
}
|
}
|
||||||
`
|
`, name)
|
||||||
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
|
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksCustomLayerExists(
|
testAccCheckAWSOpsworksCustomLayerExists(
|
||||||
|
@ -74,7 +74,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName),
|
Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -219,7 +219,7 @@ func testAccCheckAWSOpsworksCreateLayerAttributes(
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedEbsVolumes := []*opsworks.VolumeConfiguration{
|
expectedEbsVolumes := []*opsworks.VolumeConfiguration{
|
||||||
&opsworks.VolumeConfiguration{
|
{
|
||||||
VolumeType: aws.String("gp2"),
|
VolumeType: aws.String("gp2"),
|
||||||
NumberOfDisks: aws.Int64(2),
|
NumberOfDisks: aws.Int64(2),
|
||||||
MountPoint: aws.String("/home"),
|
MountPoint: aws.String("/home"),
|
||||||
|
|
|
@ -10,17 +10,17 @@ func resourceAwsOpsworksGangliaLayer() *schema.Resource {
|
||||||
DefaultLayerName: "Ganglia",
|
DefaultLayerName: "Ganglia",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"url": &opsworksLayerTypeAttribute{
|
"url": {
|
||||||
AttrName: "GangliaUrl",
|
AttrName: "GangliaUrl",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "/ganglia",
|
Default: "/ganglia",
|
||||||
},
|
},
|
||||||
"username": &opsworksLayerTypeAttribute{
|
"username": {
|
||||||
AttrName: "GangliaUser",
|
AttrName: "GangliaUser",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "opsworks",
|
Default: "opsworks",
|
||||||
},
|
},
|
||||||
"password": &opsworksLayerTypeAttribute{
|
"password": {
|
||||||
AttrName: "GangliaPassword",
|
AttrName: "GangliaPassword",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
|
|
@ -10,33 +10,33 @@ func resourceAwsOpsworksHaproxyLayer() *schema.Resource {
|
||||||
DefaultLayerName: "HAProxy",
|
DefaultLayerName: "HAProxy",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"stats_enabled": &opsworksLayerTypeAttribute{
|
"stats_enabled": {
|
||||||
AttrName: "EnableHaproxyStats",
|
AttrName: "EnableHaproxyStats",
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Default: true,
|
Default: true,
|
||||||
},
|
},
|
||||||
"stats_url": &opsworksLayerTypeAttribute{
|
"stats_url": {
|
||||||
AttrName: "HaproxyStatsUrl",
|
AttrName: "HaproxyStatsUrl",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "/haproxy?stats",
|
Default: "/haproxy?stats",
|
||||||
},
|
},
|
||||||
"stats_user": &opsworksLayerTypeAttribute{
|
"stats_user": {
|
||||||
AttrName: "HaproxyStatsUser",
|
AttrName: "HaproxyStatsUser",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "opsworks",
|
Default: "opsworks",
|
||||||
},
|
},
|
||||||
"stats_password": &opsworksLayerTypeAttribute{
|
"stats_password": {
|
||||||
AttrName: "HaproxyStatsPassword",
|
AttrName: "HaproxyStatsPassword",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
WriteOnly: true,
|
WriteOnly: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"healthcheck_url": &opsworksLayerTypeAttribute{
|
"healthcheck_url": {
|
||||||
AttrName: "HaproxyHealthCheckUrl",
|
AttrName: "HaproxyHealthCheckUrl",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "/",
|
Default: "/",
|
||||||
},
|
},
|
||||||
"healthcheck_method": &opsworksLayerTypeAttribute{
|
"healthcheck_method": {
|
||||||
AttrName: "HaproxyHealthCheckMethod",
|
AttrName: "HaproxyHealthCheckMethod",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "OPTIONS",
|
Default: "OPTIONS",
|
||||||
|
|
|
@ -71,7 +71,7 @@ func TestAccAWSOpsworksInstance(t *testing.T) {
|
||||||
"aws_opsworks_instance.tf-acc", "tenancy", "default",
|
"aws_opsworks_instance.tf-acc", "tenancy", "default",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2014.09", // inherited from opsworks_stack_test
|
"aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2016.09", // inherited from opsworks_stack_test
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_instance.tf-acc", "root_device_type", "ebs", // inherited from opsworks_stack_test
|
"aws_opsworks_instance.tf-acc", "root_device_type", "ebs", // inherited from opsworks_stack_test
|
||||||
|
|
|
@ -10,27 +10,27 @@ func resourceAwsOpsworksJavaAppLayer() *schema.Resource {
|
||||||
DefaultLayerName: "Java App Server",
|
DefaultLayerName: "Java App Server",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"jvm_type": &opsworksLayerTypeAttribute{
|
"jvm_type": {
|
||||||
AttrName: "Jvm",
|
AttrName: "Jvm",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "openjdk",
|
Default: "openjdk",
|
||||||
},
|
},
|
||||||
"jvm_version": &opsworksLayerTypeAttribute{
|
"jvm_version": {
|
||||||
AttrName: "JvmVersion",
|
AttrName: "JvmVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "7",
|
Default: "7",
|
||||||
},
|
},
|
||||||
"jvm_options": &opsworksLayerTypeAttribute{
|
"jvm_options": {
|
||||||
AttrName: "JvmOptions",
|
AttrName: "JvmOptions",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
"app_server": &opsworksLayerTypeAttribute{
|
"app_server": {
|
||||||
AttrName: "JavaAppServer",
|
AttrName: "JavaAppServer",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "tomcat",
|
Default: "tomcat",
|
||||||
},
|
},
|
||||||
"app_server_version": &opsworksLayerTypeAttribute{
|
"app_server_version": {
|
||||||
AttrName: "JavaAppServerVersion",
|
AttrName: "JavaAppServerVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "7",
|
Default: "7",
|
||||||
|
|
|
@ -10,7 +10,7 @@ func resourceAwsOpsworksMemcachedLayer() *schema.Resource {
|
||||||
DefaultLayerName: "Memcached",
|
DefaultLayerName: "Memcached",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"allocated_memory": &opsworksLayerTypeAttribute{
|
"allocated_memory": {
|
||||||
AttrName: "MemcachedMemory",
|
AttrName: "MemcachedMemory",
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Default: 512,
|
Default: 512,
|
||||||
|
|
|
@ -10,12 +10,12 @@ func resourceAwsOpsworksMysqlLayer() *schema.Resource {
|
||||||
DefaultLayerName: "MySQL",
|
DefaultLayerName: "MySQL",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"root_password": &opsworksLayerTypeAttribute{
|
"root_password": {
|
||||||
AttrName: "MysqlRootPassword",
|
AttrName: "MysqlRootPassword",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
WriteOnly: true,
|
WriteOnly: true,
|
||||||
},
|
},
|
||||||
"root_password_on_all_instances": &opsworksLayerTypeAttribute{
|
"root_password_on_all_instances": {
|
||||||
AttrName: "MysqlRootPasswordUbiquitous",
|
AttrName: "MysqlRootPasswordUbiquitous",
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Default: true,
|
Default: true,
|
||||||
|
|
|
@ -10,7 +10,7 @@ func resourceAwsOpsworksNodejsAppLayer() *schema.Resource {
|
||||||
DefaultLayerName: "Node.js App Server",
|
DefaultLayerName: "Node.js App Server",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"nodejs_version": &opsworksLayerTypeAttribute{
|
"nodejs_version": {
|
||||||
AttrName: "NodejsVersion",
|
AttrName: "NodejsVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "0.10.38",
|
Default: "0.10.38",
|
||||||
|
|
|
@ -20,26 +20,26 @@ func resourceAwsOpsworksPermission() *schema.Resource {
|
||||||
Read: resourceAwsOpsworksPermissionRead,
|
Read: resourceAwsOpsworksPermissionRead,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"id": &schema.Schema{
|
"id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"allow_ssh": &schema.Schema{
|
"allow_ssh": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"allow_sudo": &schema.Schema{
|
"allow_sudo": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"user_arn": &schema.Schema{
|
"user_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
// one of deny, show, deploy, manage, iam_only
|
// one of deny, show, deploy, manage, iam_only
|
||||||
"level": &schema.Schema{
|
"level": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -61,7 +61,7 @@ func resourceAwsOpsworksPermission() *schema.Resource {
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"stack_id": &schema.Schema{
|
"stack_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksPermissionDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksPermissionDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksPermissionCreate(sName, "true", "true", "iam_only"),
|
Config: testAccAwsOpsworksPermissionCreate(sName, "true", "true", "iam_only"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksPermissionExists(
|
testAccCheckAWSOpsworksPermissionExists(
|
||||||
|
@ -37,7 +37,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksPermissionCreate(sName, "true", "false", "iam_only"),
|
Config: testAccAwsOpsworksPermissionCreate(sName, "true", "false", "iam_only"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksPermissionExists(
|
testAccCheckAWSOpsworksPermissionExists(
|
||||||
|
@ -54,7 +54,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "deny"),
|
Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "deny"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksPermissionExists(
|
testAccCheckAWSOpsworksPermissionExists(
|
||||||
|
@ -71,7 +71,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "show"),
|
Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "show"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksPermissionExists(
|
testAccCheckAWSOpsworksPermissionExists(
|
||||||
|
|
|
@ -10,32 +10,32 @@ func resourceAwsOpsworksRailsAppLayer() *schema.Resource {
|
||||||
DefaultLayerName: "Rails App Server",
|
DefaultLayerName: "Rails App Server",
|
||||||
|
|
||||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||||
"ruby_version": &opsworksLayerTypeAttribute{
|
"ruby_version": {
|
||||||
AttrName: "RubyVersion",
|
AttrName: "RubyVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "2.0.0",
|
Default: "2.0.0",
|
||||||
},
|
},
|
||||||
"app_server": &opsworksLayerTypeAttribute{
|
"app_server": {
|
||||||
AttrName: "RailsStack",
|
AttrName: "RailsStack",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "apache_passenger",
|
Default: "apache_passenger",
|
||||||
},
|
},
|
||||||
"passenger_version": &opsworksLayerTypeAttribute{
|
"passenger_version": {
|
||||||
AttrName: "PassengerVersion",
|
AttrName: "PassengerVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "4.0.46",
|
Default: "4.0.46",
|
||||||
},
|
},
|
||||||
"rubygems_version": &opsworksLayerTypeAttribute{
|
"rubygems_version": {
|
||||||
AttrName: "RubygemsVersion",
|
AttrName: "RubygemsVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "2.2.2",
|
Default: "2.2.2",
|
||||||
},
|
},
|
||||||
"manage_bundler": &opsworksLayerTypeAttribute{
|
"manage_bundler": {
|
||||||
AttrName: "ManageBundler",
|
AttrName: "ManageBundler",
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Default: true,
|
Default: true,
|
||||||
},
|
},
|
||||||
"bundler_version": &opsworksLayerTypeAttribute{
|
"bundler_version": {
|
||||||
AttrName: "BundlerVersion",
|
AttrName: "BundlerVersion",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Default: "1.5.3",
|
Default: "1.5.3",
|
||||||
|
|
|
@ -22,7 +22,7 @@ func TestAccAWSOpsworksRailsAppLayer(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksRailsAppLayerDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksRailsAppLayerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksRailsAppLayerConfigVpcCreate(stackName),
|
Config: testAccAwsOpsworksRailsAppLayerConfigVpcCreate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
@ -33,7 +33,7 @@ func TestAccAWSOpsworksRailsAppLayer(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksRailsAppLayerNoManageBundlerConfigVpcCreate(stackName),
|
Config: testAccAwsOpsworksRailsAppLayerNoManageBundlerConfigVpcCreate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
|
|
|
@ -20,26 +20,26 @@ func resourceAwsOpsworksRdsDbInstance() *schema.Resource {
|
||||||
Read: resourceAwsOpsworksRdsDbInstanceRead,
|
Read: resourceAwsOpsworksRdsDbInstanceRead,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"id": &schema.Schema{
|
"id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"stack_id": &schema.Schema{
|
"stack_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"rds_db_instance_arn": &schema.Schema{
|
"rds_db_instance_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"db_password": &schema.Schema{
|
"db_password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
"db_user": &schema.Schema{
|
"db_user": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksRdsDbDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksRdsDbDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksRdsDbInstance(sName, "foo", "barbarbarbar"),
|
Config: testAccAwsOpsworksRdsDbInstance(sName, "foo", "barbarbarbar"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksRdsDbExists(
|
testAccCheckAWSOpsworksRdsDbExists(
|
||||||
|
@ -31,7 +31,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "barbarbarbar"),
|
Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "barbarbarbar"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksRdsDbExists(
|
testAccCheckAWSOpsworksRdsDbExists(
|
||||||
|
@ -42,7 +42,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "foofoofoofoofoo"),
|
Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "foofoofoofoofoo"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksRdsDbExists(
|
testAccCheckAWSOpsworksRdsDbExists(
|
||||||
|
@ -53,7 +53,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksRdsDbInstanceForceNew(sName),
|
Config: testAccAwsOpsworksRdsDbInstanceForceNew(sName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksRdsDbExists(
|
testAccCheckAWSOpsworksRdsDbExists(
|
||||||
|
|
|
@ -25,99 +25,99 @@ func resourceAwsOpsworksStack() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"agent_version": &schema.Schema{
|
"agent_version": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"id": &schema.Schema{
|
"id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"name": &schema.Schema{
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"region": &schema.Schema{
|
"region": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"service_role_arn": &schema.Schema{
|
"service_role_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"default_instance_profile_arn": &schema.Schema{
|
"default_instance_profile_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"color": &schema.Schema{
|
"color": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"configuration_manager_name": &schema.Schema{
|
"configuration_manager_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "Chef",
|
Default: "Chef",
|
||||||
},
|
},
|
||||||
|
|
||||||
"configuration_manager_version": &schema.Schema{
|
"configuration_manager_version": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "11.4",
|
Default: "11.4",
|
||||||
},
|
},
|
||||||
|
|
||||||
"manage_berkshelf": &schema.Schema{
|
"manage_berkshelf": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"berkshelf_version": &schema.Schema{
|
"berkshelf_version": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "3.2.0",
|
Default: "3.2.0",
|
||||||
},
|
},
|
||||||
|
|
||||||
"custom_cookbooks_source": &schema.Schema{
|
"custom_cookbooks_source": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"type": &schema.Schema{
|
"type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"url": &schema.Schema{
|
"url": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"username": &schema.Schema{
|
"username": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"password": &schema.Schema{
|
"password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"revision": &schema.Schema{
|
"revision": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"ssh_key": &schema.Schema{
|
"ssh_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -125,58 +125,58 @@ func resourceAwsOpsworksStack() *schema.Resource {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"custom_json": &schema.Schema{
|
"custom_json": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"default_availability_zone": &schema.Schema{
|
"default_availability_zone": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"default_os": &schema.Schema{
|
"default_os": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "Ubuntu 12.04 LTS",
|
Default: "Ubuntu 12.04 LTS",
|
||||||
},
|
},
|
||||||
|
|
||||||
"default_root_device_type": &schema.Schema{
|
"default_root_device_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "instance-store",
|
Default: "instance-store",
|
||||||
},
|
},
|
||||||
|
|
||||||
"default_ssh_key_name": &schema.Schema{
|
"default_ssh_key_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"default_subnet_id": &schema.Schema{
|
"default_subnet_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"hostname_theme": &schema.Schema{
|
"hostname_theme": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "Layer_Dependent",
|
Default: "Layer_Dependent",
|
||||||
},
|
},
|
||||||
|
|
||||||
"use_custom_cookbooks": &schema.Schema{
|
"use_custom_cookbooks": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"use_opsworks_security_groups": &schema.Schema{
|
"use_opsworks_security_groups": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: true,
|
Default: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_id": &schema.Schema{
|
"vpc_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|
|
@ -25,7 +25,7 @@ func TestAccAWSOpsworksStackNoVpc(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName),
|
Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksStackExists(
|
testAccCheckAWSOpsworksStackExists(
|
||||||
|
@ -36,10 +36,6 @@ func TestAccAWSOpsworksStackNoVpc(t *testing.T) {
|
||||||
"us-east-1a", stackName),
|
"us-east-1a", stackName),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
// resource.TestStep{
|
|
||||||
// Config: testAccAWSOpsworksStackConfigNoVpcUpdate(stackName),
|
|
||||||
// Check: testAccAwsOpsworksStackCheckResourceAttrsUpdate("us-east-1c", stackName),
|
|
||||||
// },
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -52,7 +48,7 @@ func TestAccAWSOpsworksStackVpc(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksStackConfigVpcCreate(stackName),
|
Config: testAccAwsOpsworksStackConfigVpcCreate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksStackExists(
|
testAccCheckAWSOpsworksStackExists(
|
||||||
|
@ -63,7 +59,7 @@ func TestAccAWSOpsworksStackVpc(t *testing.T) {
|
||||||
"us-west-2a", stackName),
|
"us-west-2a", stackName),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSOpsworksStackConfigVpcUpdate(stackName),
|
Config: testAccAWSOpsworksStackConfigVpcUpdate(stackName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksStackExists(
|
testAccCheckAWSOpsworksStackExists(
|
||||||
|
@ -97,7 +93,7 @@ func testAccAwsOpsworksStackCheckResourceAttrsCreate(zone, stackName string) res
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_stack.tf-acc",
|
"aws_opsworks_stack.tf-acc",
|
||||||
"default_os",
|
"default_os",
|
||||||
"Amazon Linux 2014.09",
|
"Amazon Linux 2016.09",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_stack.tf-acc",
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
@ -137,7 +133,7 @@ func testAccAwsOpsworksStackCheckResourceAttrsUpdate(zone, stackName string) res
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_stack.tf-acc",
|
"aws_opsworks_stack.tf-acc",
|
||||||
"default_os",
|
"default_os",
|
||||||
"Amazon Linux 2014.09",
|
"Amazon Linux 2015.09",
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_stack.tf-acc",
|
"aws_opsworks_stack.tf-acc",
|
||||||
|
@ -240,7 +236,7 @@ func testAccCheckAWSOpsworksCreateStackAttributes(
|
||||||
return fmt.Errorf("Unnexpected DefaultAvailabilityZone: %s", *opsstack.DefaultAvailabilityZone)
|
return fmt.Errorf("Unnexpected DefaultAvailabilityZone: %s", *opsstack.DefaultAvailabilityZone)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *opsstack.DefaultOs != "Amazon Linux 2014.09" {
|
if *opsstack.DefaultOs != "Amazon Linux 2016.09" {
|
||||||
return fmt.Errorf("Unnexpected stackName: %s", *opsstack.DefaultOs)
|
return fmt.Errorf("Unnexpected stackName: %s", *opsstack.DefaultOs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,7 +271,7 @@ func testAccCheckAWSOpsworksUpdateStackAttributes(
|
||||||
return fmt.Errorf("Unnexpected DefaultAvailabilityZone: %s", *opsstack.DefaultAvailabilityZone)
|
return fmt.Errorf("Unnexpected DefaultAvailabilityZone: %s", *opsstack.DefaultAvailabilityZone)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *opsstack.DefaultOs != "Amazon Linux 2014.09" {
|
if *opsstack.DefaultOs != "Amazon Linux 2015.09" {
|
||||||
return fmt.Errorf("Unnexpected stackName: %s", *opsstack.DefaultOs)
|
return fmt.Errorf("Unnexpected stackName: %s", *opsstack.DefaultOs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -348,13 +344,16 @@ func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
|
||||||
|
|
||||||
func testAccAwsOpsworksStackConfigNoVpcCreate(name string) string {
|
func testAccAwsOpsworksStackConfigNoVpcCreate(name string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
resource "aws_opsworks_stack" "tf-acc" {
|
resource "aws_opsworks_stack" "tf-acc" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
region = "us-east-1"
|
region = "us-east-1"
|
||||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||||
default_availability_zone = "us-east-1a"
|
default_availability_zone = "us-east-1a"
|
||||||
default_os = "Amazon Linux 2014.09"
|
default_os = "Amazon Linux 2016.09"
|
||||||
default_root_device_type = "ebs"
|
default_root_device_type = "ebs"
|
||||||
custom_json = "{\"key\": \"value\"}"
|
custom_json = "{\"key\": \"value\"}"
|
||||||
configuration_manager_version = "11.10"
|
configuration_manager_version = "11.10"
|
||||||
|
@ -427,95 +426,6 @@ resource "aws_iam_instance_profile" "opsworks_instance" {
|
||||||
}`, name, name, name, name, name)
|
}`, name, name, name, name, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccAWSOpsworksStackConfigNoVpcUpdate(name string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "aws_opsworks_stack" "tf-acc" {
|
|
||||||
name = "%s"
|
|
||||||
region = "us-east-1"
|
|
||||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
|
||||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
|
||||||
default_availability_zone = "us-east-1a"
|
|
||||||
default_os = "Amazon Linux 2014.09"
|
|
||||||
default_root_device_type = "ebs"
|
|
||||||
custom_json = "{\"key\": \"value\"}"
|
|
||||||
configuration_manager_version = "11.10"
|
|
||||||
use_opsworks_security_groups = false
|
|
||||||
use_custom_cookbooks = true
|
|
||||||
manage_berkshelf = true
|
|
||||||
custom_cookbooks_source {
|
|
||||||
type = "git"
|
|
||||||
revision = "master"
|
|
||||||
url = "https://github.com/aws/opsworks-example-cookbooks.git"
|
|
||||||
username = "example"
|
|
||||||
password = "example"
|
|
||||||
}
|
|
||||||
resource "aws_iam_role" "opsworks_service" {
|
|
||||||
name = "%s_opsworks_service"
|
|
||||||
assume_role_policy = <<EOT
|
|
||||||
{
|
|
||||||
"Version": "2008-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Sid": "",
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": {
|
|
||||||
"Service": "opsworks.amazonaws.com"
|
|
||||||
},
|
|
||||||
"Action": "sts:AssumeRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "opsworks_service" {
|
|
||||||
name = "%s_opsworks_service"
|
|
||||||
role = "${aws_iam_role.opsworks_service.id}"
|
|
||||||
policy = <<EOT
|
|
||||||
{
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Action": [
|
|
||||||
"ec2:*",
|
|
||||||
"iam:PassRole",
|
|
||||||
"cloudwatch:GetMetricStatistics",
|
|
||||||
"elasticloadbalancing:*",
|
|
||||||
"rds:*"
|
|
||||||
],
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Resource": ["*"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role" "opsworks_instance" {
|
|
||||||
name = "%s_opsworks_instance"
|
|
||||||
assume_role_policy = <<EOT
|
|
||||||
{
|
|
||||||
"Version": "2008-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Sid": "",
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": {
|
|
||||||
"Service": "ec2.amazonaws.com"
|
|
||||||
},
|
|
||||||
"Action": "sts:AssumeRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "opsworks_instance" {
|
|
||||||
name = "%s_opsworks_instance"
|
|
||||||
roles = ["${aws_iam_role.opsworks_instance.name}"]
|
|
||||||
}
|
|
||||||
`, name, name, name, name, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////
|
////////////////////////////
|
||||||
//// Tests for the VPC case
|
//// Tests for the VPC case
|
||||||
////////////////////////////
|
////////////////////////////
|
||||||
|
@ -537,7 +447,7 @@ resource "aws_opsworks_stack" "tf-acc" {
|
||||||
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
||||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||||
default_os = "Amazon Linux 2014.09"
|
default_os = "Amazon Linux 2016.09"
|
||||||
default_root_device_type = "ebs"
|
default_root_device_type = "ebs"
|
||||||
custom_json = "{\"key\": \"value\"}"
|
custom_json = "{\"key\": \"value\"}"
|
||||||
configuration_manager_version = "11.10"
|
configuration_manager_version = "11.10"
|
||||||
|
@ -628,7 +538,7 @@ resource "aws_opsworks_stack" "tf-acc" {
|
||||||
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
||||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||||
default_os = "Amazon Linux 2014.09"
|
default_os = "Amazon Linux 2015.09"
|
||||||
default_root_device_type = "ebs"
|
default_root_device_type = "ebs"
|
||||||
custom_json = "{\"key\": \"value\"}"
|
custom_json = "{\"key\": \"value\"}"
|
||||||
configuration_manager_version = "11.10"
|
configuration_manager_version = "11.10"
|
||||||
|
|
|
@ -18,28 +18,29 @@ func resourceAwsOpsworksUserProfile() *schema.Resource {
|
||||||
Delete: resourceAwsOpsworksUserProfileDelete,
|
Delete: resourceAwsOpsworksUserProfileDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"id": &schema.Schema{
|
"id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"user_arn": &schema.Schema{
|
"user_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"allow_self_management": &schema.Schema{
|
"allow_self_management": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"ssh_username": &schema.Schema{
|
"ssh_username": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"ssh_public_key": &schema.Schema{
|
"ssh_public_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
|
@ -14,14 +14,14 @@ import (
|
||||||
|
|
||||||
func TestAccAWSOpsworksUserProfile(t *testing.T) {
|
func TestAccAWSOpsworksUserProfile(t *testing.T) {
|
||||||
rName := fmt.Sprintf("test-user-%d", acctest.RandInt())
|
rName := fmt.Sprintf("test-user-%d", acctest.RandInt())
|
||||||
roleName := fmt.Sprintf("tf-ops-user-profile-%d", acctest.RandInt())
|
updateRName := fmt.Sprintf("test-user-%d", acctest.RandInt())
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAwsOpsworksUserProfileDestroy,
|
CheckDestroy: testAccCheckAwsOpsworksUserProfileDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAwsOpsworksUserProfileCreate(rName, roleName),
|
Config: testAccAwsOpsworksUserProfileCreate(rName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSOpsworksUserProfileExists(
|
testAccCheckAWSOpsworksUserProfileExists(
|
||||||
"aws_opsworks_user_profile.user", rName),
|
"aws_opsworks_user_profile.user", rName),
|
||||||
|
@ -36,6 +36,22 @@ func TestAccAWSOpsworksUserProfile(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Config: testAccAwsOpsworksUserProfileUpdate(rName, updateRName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSOpsworksUserProfileExists(
|
||||||
|
"aws_opsworks_user_profile.user", updateRName),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_user_profile.user", "ssh_public_key", "",
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_user_profile.user", "ssh_username", updateRName,
|
||||||
|
),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_opsworks_user_profile.user", "allow_self_management", "false",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -114,7 +130,7 @@ func testAccCheckAwsOpsworksUserProfileDestroy(s *terraform.State) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccAwsOpsworksUserProfileCreate(rn, roleName string) string {
|
func testAccAwsOpsworksUserProfileCreate(rn string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_opsworks_user_profile" "user" {
|
resource "aws_opsworks_user_profile" "user" {
|
||||||
user_arn = "${aws_iam_user.user.arn}"
|
user_arn = "${aws_iam_user.user.arn}"
|
||||||
|
@ -125,7 +141,24 @@ resource "aws_iam_user" "user" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
path = "/"
|
path = "/"
|
||||||
}
|
}
|
||||||
|
`, rn)
|
||||||
%s
|
}
|
||||||
`, rn, testAccAwsOpsworksStackConfigNoVpcCreate(roleName))
|
|
||||||
|
func testAccAwsOpsworksUserProfileUpdate(rn, updateRn string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_opsworks_user_profile" "user" {
|
||||||
|
user_arn = "${aws_iam_user.new-user.arn}"
|
||||||
|
ssh_username = "${aws_iam_user.new-user.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_user" "user" {
|
||||||
|
name = "%s"
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_user" "new-user" {
|
||||||
|
name = "%s"
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
`, rn, updateRn)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
|
|
||||||
// How long to sleep if a limit-exceeded event happens
|
// How long to sleep if a limit-exceeded event happens
|
||||||
var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id, " +
|
var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id, " +
|
||||||
"nat_gateway_id, instance_id, network_interface_id, route_table_id or " +
|
"egress_only_gateway_id, nat_gateway_id, instance_id, network_interface_id, route_table_id or " +
|
||||||
"vpc_peering_connection_id is allowed.")
|
"vpc_peering_connection_id is allowed.")
|
||||||
|
|
||||||
// AWS Route resource Schema declaration
|
// AWS Route resource Schema declaration
|
||||||
|
@ -29,62 +29,73 @@ func resourceAwsRoute() *schema.Resource {
|
||||||
Exists: resourceAwsRouteExists,
|
Exists: resourceAwsRouteExists,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"destination_cidr_block": &schema.Schema{
|
"destination_cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"destination_ipv6_cidr_block": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"destination_prefix_list_id": &schema.Schema{
|
"destination_prefix_list_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"gateway_id": &schema.Schema{
|
"gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"nat_gateway_id": &schema.Schema{
|
"egress_only_gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_id": &schema.Schema{
|
"nat_gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_owner_id": &schema.Schema{
|
"instance_id": {
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"network_interface_id": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"origin": &schema.Schema{
|
"instance_owner_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"state": &schema.Schema{
|
"network_interface_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"origin": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"route_table_id": &schema.Schema{
|
"state": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"route_table_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_peering_connection_id": &schema.Schema{
|
"vpc_peering_connection_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -97,6 +108,7 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
var numTargets int
|
var numTargets int
|
||||||
var setTarget string
|
var setTarget string
|
||||||
allowedTargets := []string{
|
allowedTargets := []string{
|
||||||
|
"egress_only_gateway_id",
|
||||||
"gateway_id",
|
"gateway_id",
|
||||||
"nat_gateway_id",
|
"nat_gateway_id",
|
||||||
"instance_id",
|
"instance_id",
|
||||||
|
@ -125,6 +137,12 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
||||||
GatewayId: aws.String(d.Get("gateway_id").(string)),
|
GatewayId: aws.String(d.Get("gateway_id").(string)),
|
||||||
}
|
}
|
||||||
|
case "egress_only_gateway_id":
|
||||||
|
createOpts = &ec2.CreateRouteInput{
|
||||||
|
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||||
|
DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)),
|
||||||
|
EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)),
|
||||||
|
}
|
||||||
case "nat_gateway_id":
|
case "nat_gateway_id":
|
||||||
createOpts = &ec2.CreateRouteInput{
|
createOpts = &ec2.CreateRouteInput{
|
||||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||||
|
@ -180,13 +198,26 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var route *ec2.Route
|
var route *ec2.Route
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("destination_cidr_block"); ok {
|
||||||
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
|
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||||
route, err = findResourceRoute(conn, d.Get("route_table_id").(string), d.Get("destination_cidr_block").(string))
|
route, err = findResourceRoute(conn, d.Get("route_table_id").(string), v.(string), "")
|
||||||
return resource.RetryableError(err)
|
return resource.RetryableError(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error finding route after creating it: %s", err)
|
return fmt.Errorf("Error finding route after creating it: %s", err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok {
|
||||||
|
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||||
|
route, err = findResourceRoute(conn, d.Get("route_table_id").(string), "", v.(string))
|
||||||
|
return resource.RetryableError(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error finding route after creating it: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
d.SetId(routeIDHash(d, route))
|
d.SetId(routeIDHash(d, route))
|
||||||
resourceAwsRouteSetResourceData(d, route)
|
resourceAwsRouteSetResourceData(d, route)
|
||||||
|
@ -197,7 +228,10 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).ec2conn
|
conn := meta.(*AWSClient).ec2conn
|
||||||
routeTableId := d.Get("route_table_id").(string)
|
routeTableId := d.Get("route_table_id").(string)
|
||||||
|
|
||||||
route, err := findResourceRoute(conn, routeTableId, d.Get("destination_cidr_block").(string))
|
destinationCidrBlock := d.Get("destination_cidr_block").(string)
|
||||||
|
destinationIpv6CidrBlock := d.Get("destination_ipv6_cidr_block").(string)
|
||||||
|
|
||||||
|
route, err := findResourceRoute(conn, routeTableId, destinationCidrBlock, destinationIpv6CidrBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" {
|
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" {
|
||||||
log.Printf("[WARN] Route Table %q could not be found. Removing Route from state.",
|
log.Printf("[WARN] Route Table %q could not be found. Removing Route from state.",
|
||||||
|
@ -214,6 +248,7 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
func resourceAwsRouteSetResourceData(d *schema.ResourceData, route *ec2.Route) {
|
func resourceAwsRouteSetResourceData(d *schema.ResourceData, route *ec2.Route) {
|
||||||
d.Set("destination_prefix_list_id", route.DestinationPrefixListId)
|
d.Set("destination_prefix_list_id", route.DestinationPrefixListId)
|
||||||
d.Set("gateway_id", route.GatewayId)
|
d.Set("gateway_id", route.GatewayId)
|
||||||
|
d.Set("egress_only_gateway_id", route.EgressOnlyInternetGatewayId)
|
||||||
d.Set("nat_gateway_id", route.NatGatewayId)
|
d.Set("nat_gateway_id", route.NatGatewayId)
|
||||||
d.Set("instance_id", route.InstanceId)
|
d.Set("instance_id", route.InstanceId)
|
||||||
d.Set("instance_owner_id", route.InstanceOwnerId)
|
d.Set("instance_owner_id", route.InstanceOwnerId)
|
||||||
|
@ -229,6 +264,7 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
var setTarget string
|
var setTarget string
|
||||||
|
|
||||||
allowedTargets := []string{
|
allowedTargets := []string{
|
||||||
|
"egress_only_gateway_id",
|
||||||
"gateway_id",
|
"gateway_id",
|
||||||
"nat_gateway_id",
|
"nat_gateway_id",
|
||||||
"network_interface_id",
|
"network_interface_id",
|
||||||
|
@ -267,6 +303,12 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
||||||
GatewayId: aws.String(d.Get("gateway_id").(string)),
|
GatewayId: aws.String(d.Get("gateway_id").(string)),
|
||||||
}
|
}
|
||||||
|
case "egress_only_gateway_id":
|
||||||
|
replaceOpts = &ec2.ReplaceRouteInput{
|
||||||
|
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||||
|
DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)),
|
||||||
|
EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)),
|
||||||
|
}
|
||||||
case "nat_gateway_id":
|
case "nat_gateway_id":
|
||||||
replaceOpts = &ec2.ReplaceRouteInput{
|
replaceOpts = &ec2.ReplaceRouteInput{
|
||||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||||
|
@ -310,7 +352,12 @@ func resourceAwsRouteDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
|
||||||
deleteOpts := &ec2.DeleteRouteInput{
|
deleteOpts := &ec2.DeleteRouteInput{
|
||||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||||
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
}
|
||||||
|
if v, ok := d.GetOk("destination_cidr_block"); ok {
|
||||||
|
deleteOpts.DestinationCidrBlock = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok {
|
||||||
|
deleteOpts.DestinationIpv6CidrBlock = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
log.Printf("[DEBUG] Route delete opts: %s", deleteOpts)
|
log.Printf("[DEBUG] Route delete opts: %s", deleteOpts)
|
||||||
|
|
||||||
|
@ -368,23 +415,37 @@ func resourceAwsRouteExists(d *schema.ResourceData, meta interface{}) (bool, err
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cidr := d.Get("destination_cidr_block").(string)
|
if v, ok := d.GetOk("destination_cidr_block"); ok {
|
||||||
for _, route := range (*res.RouteTables[0]).Routes {
|
for _, route := range (*res.RouteTables[0]).Routes {
|
||||||
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr {
|
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == v.(string) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok {
|
||||||
|
for _, route := range (*res.RouteTables[0]).Routes {
|
||||||
|
if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == v.(string) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an ID for a route
|
// Create an ID for a route
|
||||||
func routeIDHash(d *schema.ResourceData, r *ec2.Route) string {
|
func routeIDHash(d *schema.ResourceData, r *ec2.Route) string {
|
||||||
|
|
||||||
|
if r.DestinationIpv6CidrBlock != nil && *r.DestinationIpv6CidrBlock != "" {
|
||||||
|
return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationIpv6CidrBlock))
|
||||||
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationCidrBlock))
|
return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationCidrBlock))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper: retrieve a route
|
// Helper: retrieve a route
|
||||||
func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string) (*ec2.Route, error) {
|
func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string, ipv6cidr string) (*ec2.Route, error) {
|
||||||
routeTableID := rtbid
|
routeTableID := rtbid
|
||||||
|
|
||||||
findOpts := &ec2.DescribeRouteTablesInput{
|
findOpts := &ec2.DescribeRouteTablesInput{
|
||||||
|
@ -401,6 +462,7 @@ func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string) (*ec2.Route, er
|
||||||
routeTableID)
|
routeTableID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cidr != "" {
|
||||||
for _, route := range (*resp.RouteTables[0]).Routes {
|
for _, route := range (*resp.RouteTables[0]).Routes {
|
||||||
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr {
|
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr {
|
||||||
return route, nil
|
return route, nil
|
||||||
|
@ -409,4 +471,20 @@ func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string) (*ec2.Route, er
|
||||||
|
|
||||||
return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+
|
return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+
|
||||||
"and destination CIDR block (%s).", rtbid, cidr)
|
"and destination CIDR block (%s).", rtbid, cidr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv6cidr != "" {
|
||||||
|
for _, route := range (*resp.RouteTables[0]).Routes {
|
||||||
|
if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == ipv6cidr {
|
||||||
|
return route, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+
|
||||||
|
"and destination IPv6 CIDR block (%s).", rtbid, ipv6cidr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("When trying to find a matching route for Route Table %q "+
|
||||||
|
"you need to specify a CIDR block of IPv6 CIDR Block", rtbid)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ func resourceAwsRouteTable() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"vpc_id": &schema.Schema{
|
"vpc_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
@ -33,45 +33,55 @@ func resourceAwsRouteTable() *schema.Resource {
|
||||||
|
|
||||||
"tags": tagsSchema(),
|
"tags": tagsSchema(),
|
||||||
|
|
||||||
"propagating_vgws": &schema.Schema{
|
"propagating_vgws": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"route": &schema.Schema{
|
"route": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"cidr_block": &schema.Schema{
|
"cidr_block": {
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gateway_id": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_id": &schema.Schema{
|
"ipv6_cidr_block": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"nat_gateway_id": &schema.Schema{
|
"egress_only_gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_peering_connection_id": &schema.Schema{
|
"gateway_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"network_interface_id": &schema.Schema{
|
"instance_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"nat_gateway_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"vpc_peering_connection_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"network_interface_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
@ -166,6 +176,12 @@ func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
if r.DestinationCidrBlock != nil {
|
if r.DestinationCidrBlock != nil {
|
||||||
m["cidr_block"] = *r.DestinationCidrBlock
|
m["cidr_block"] = *r.DestinationCidrBlock
|
||||||
}
|
}
|
||||||
|
if r.DestinationIpv6CidrBlock != nil {
|
||||||
|
m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock
|
||||||
|
}
|
||||||
|
if r.EgressOnlyInternetGatewayId != nil {
|
||||||
|
m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId
|
||||||
|
}
|
||||||
if r.GatewayId != nil {
|
if r.GatewayId != nil {
|
||||||
m["gateway_id"] = *r.GatewayId
|
m["gateway_id"] = *r.GatewayId
|
||||||
}
|
}
|
||||||
|
@ -266,14 +282,27 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
for _, route := range ors.List() {
|
for _, route := range ors.List() {
|
||||||
m := route.(map[string]interface{})
|
m := route.(map[string]interface{})
|
||||||
|
|
||||||
// Delete the route as it no longer exists in the config
|
deleteOpts := &ec2.DeleteRouteInput{
|
||||||
|
RouteTableId: aws.String(d.Id()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["ipv6_cidr_block"].(string); s != "" {
|
||||||
|
deleteOpts.DestinationIpv6CidrBlock = aws.String(s)
|
||||||
|
|
||||||
|
log.Printf(
|
||||||
|
"[INFO] Deleting route from %s: %s",
|
||||||
|
d.Id(), m["ipv6_cidr_block"].(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["cidr_block"].(string); s != "" {
|
||||||
|
deleteOpts.DestinationCidrBlock = aws.String(s)
|
||||||
|
|
||||||
log.Printf(
|
log.Printf(
|
||||||
"[INFO] Deleting route from %s: %s",
|
"[INFO] Deleting route from %s: %s",
|
||||||
d.Id(), m["cidr_block"].(string))
|
d.Id(), m["cidr_block"].(string))
|
||||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
}
|
||||||
RouteTableId: aws.String(d.Id()),
|
|
||||||
DestinationCidrBlock: aws.String(m["cidr_block"].(string)),
|
_, err := conn.DeleteRoute(deleteOpts)
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -289,15 +318,38 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
|
|
||||||
opts := ec2.CreateRouteInput{
|
opts := ec2.CreateRouteInput{
|
||||||
RouteTableId: aws.String(d.Id()),
|
RouteTableId: aws.String(d.Id()),
|
||||||
DestinationCidrBlock: aws.String(m["cidr_block"].(string)),
|
|
||||||
GatewayId: aws.String(m["gateway_id"].(string)),
|
|
||||||
InstanceId: aws.String(m["instance_id"].(string)),
|
|
||||||
VpcPeeringConnectionId: aws.String(m["vpc_peering_connection_id"].(string)),
|
|
||||||
NetworkInterfaceId: aws.String(m["network_interface_id"].(string)),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if m["nat_gateway_id"].(string) != "" {
|
if s := m["vpc_peering_connection_id"].(string); s != "" {
|
||||||
opts.NatGatewayId = aws.String(m["nat_gateway_id"].(string))
|
opts.VpcPeeringConnectionId = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["network_interface_id"].(string); s != "" {
|
||||||
|
opts.NetworkInterfaceId = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["instance_id"].(string); s != "" {
|
||||||
|
opts.InstanceId = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["ipv6_cidr_block"].(string); s != "" {
|
||||||
|
opts.DestinationIpv6CidrBlock = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["cidr_block"].(string); s != "" {
|
||||||
|
opts.DestinationCidrBlock = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["gateway_id"].(string); s != "" {
|
||||||
|
opts.GatewayId = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["egress_only_gateway_id"].(string); s != "" {
|
||||||
|
opts.EgressOnlyInternetGatewayId = aws.String(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := m["nat_gateway_id"].(string); s != "" {
|
||||||
|
opts.NatGatewayId = aws.String(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] Creating route for %s: %#v", d.Id(), opts)
|
log.Printf("[INFO] Creating route for %s: %#v", d.Id(), opts)
|
||||||
|
@ -402,6 +454,10 @@ func resourceAwsRouteTableHash(v interface{}) int {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
m := v.(map[string]interface{})
|
m := v.(map[string]interface{})
|
||||||
|
|
||||||
|
if v, ok := m["ipv6_cidr_block"]; ok {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := m["cidr_block"]; ok {
|
if v, ok := m["cidr_block"]; ok {
|
||||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
}
|
}
|
||||||
|
@ -410,6 +466,10 @@ func resourceAwsRouteTableHash(v interface{}) int {
|
||||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := m["egress_only_gateway_id"]; ok {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
natGatewaySet := false
|
natGatewaySet := false
|
||||||
if v, ok := m["nat_gateway_id"]; ok {
|
if v, ok := m["nat_gateway_id"]; ok {
|
||||||
natGatewaySet = v.(string) != ""
|
natGatewaySet = v.(string) != ""
|
||||||
|
|
|
@ -63,7 +63,7 @@ func TestAccAWSRouteTable_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfig,
|
Config: testAccRouteTableConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -72,7 +72,7 @@ func TestAccAWSRouteTable_basic(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfigChange,
|
Config: testAccRouteTableConfigChange,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -113,7 +113,7 @@ func TestAccAWSRouteTable_instance(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfigInstance,
|
Config: testAccRouteTableConfigInstance,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -125,6 +125,35 @@ func TestAccAWSRouteTable_instance(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRouteTable_ipv6(t *testing.T) {
|
||||||
|
var v ec2.RouteTable
|
||||||
|
|
||||||
|
testCheck := func(*terraform.State) error {
|
||||||
|
// Expect 3: 2 IPv6 (local + all outbound) + 1 IPv4
|
||||||
|
if len(v.Routes) != 3 {
|
||||||
|
return fmt.Errorf("bad routes: %#v", v.Routes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "aws_route_table.foo",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccRouteTableConfigIpv6,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckRouteTableExists("aws_route_table.foo", &v),
|
||||||
|
testCheck,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSRouteTable_tags(t *testing.T) {
|
func TestAccAWSRouteTable_tags(t *testing.T) {
|
||||||
var route_table ec2.RouteTable
|
var route_table ec2.RouteTable
|
||||||
|
|
||||||
|
@ -134,7 +163,7 @@ func TestAccAWSRouteTable_tags(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfigTags,
|
Config: testAccRouteTableConfigTags,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists("aws_route_table.foo", &route_table),
|
testAccCheckRouteTableExists("aws_route_table.foo", &route_table),
|
||||||
|
@ -142,7 +171,7 @@ func TestAccAWSRouteTable_tags(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableConfigTagsUpdate,
|
Config: testAccRouteTableConfigTagsUpdate,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists("aws_route_table.foo", &route_table),
|
testAccCheckRouteTableExists("aws_route_table.foo", &route_table),
|
||||||
|
@ -244,7 +273,7 @@ func TestAccAWSRouteTable_vpcPeering(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableVpcPeeringConfig,
|
Config: testAccRouteTableVpcPeeringConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -285,7 +314,7 @@ func TestAccAWSRouteTable_vgwRoutePropagation(t *testing.T) {
|
||||||
testAccCheckRouteTableDestroy,
|
testAccCheckRouteTableDestroy,
|
||||||
),
|
),
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccRouteTableVgwRoutePropagationConfig,
|
Config: testAccRouteTableVgwRoutePropagationConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRouteTableExists(
|
testAccCheckRouteTableExists(
|
||||||
|
@ -342,6 +371,26 @@ resource "aws_route_table" "foo" {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccRouteTableConfigIpv6 = `
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
assign_generated_ipv6_cidr_block = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_egress_only_internet_gateway" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
|
||||||
|
route {
|
||||||
|
ipv6_cidr_block = "::/0"
|
||||||
|
egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccRouteTableConfigInstance = `
|
const testAccRouteTableConfigInstance = `
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
|
|
|
@ -38,7 +38,7 @@ func TestAccAWSRoute_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSRouteBasicConfig,
|
Config: testAccAWSRouteBasicConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||||
|
@ -49,6 +49,43 @@ func TestAccAWSRoute_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRoute_ipv6Support(t *testing.T) {
|
||||||
|
var route ec2.Route
|
||||||
|
|
||||||
|
//aws creates a default route
|
||||||
|
testCheck := func(s *terraform.State) error {
|
||||||
|
|
||||||
|
name := "aws_egress_only_internet_gateway.foo"
|
||||||
|
gwres, ok := s.RootModule().Resources[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s\n", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *route.EgressOnlyInternetGatewayId != gwres.Primary.ID {
|
||||||
|
return fmt.Errorf("Egress Only Internet Gateway Id (Expected=%s, Actual=%s)\n", gwres.Primary.ID, *route.EgressOnlyInternetGatewayId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSRouteConfigIpv6,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||||
|
testCheck,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSRoute_changeCidr(t *testing.T) {
|
func TestAccAWSRoute_changeCidr(t *testing.T) {
|
||||||
var route ec2.Route
|
var route ec2.Route
|
||||||
var routeTable ec2.RouteTable
|
var routeTable ec2.RouteTable
|
||||||
|
@ -101,14 +138,14 @@ func TestAccAWSRoute_changeCidr(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSRouteBasicConfig,
|
Config: testAccAWSRouteBasicConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||||
testCheck,
|
testCheck,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSRouteBasicConfigChangeCidr,
|
Config: testAccAWSRouteBasicConfigChangeCidr,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||||
|
@ -139,14 +176,14 @@ func TestAccAWSRoute_noopdiff(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSRouteNoopChange,
|
Config: testAccAWSRouteNoopChange,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSRouteExists("aws_route.test", &route),
|
testAccCheckAWSRouteExists("aws_route.test", &route),
|
||||||
testCheck,
|
testCheck,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSRouteNoopChange,
|
Config: testAccAWSRouteNoopChange,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSRouteExists("aws_route.test", &route),
|
testAccCheckAWSRouteExists("aws_route.test", &route),
|
||||||
|
@ -166,7 +203,7 @@ func TestAccAWSRoute_doesNotCrashWithVPCEndpoint(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSRouteWithVPCEndpoint,
|
Config: testAccAWSRouteWithVPCEndpoint,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||||
|
@ -192,6 +229,7 @@ func testAccCheckAWSRouteExists(n string, res *ec2.Route) resource.TestCheckFunc
|
||||||
conn,
|
conn,
|
||||||
rs.Primary.Attributes["route_table_id"],
|
rs.Primary.Attributes["route_table_id"],
|
||||||
rs.Primary.Attributes["destination_cidr_block"],
|
rs.Primary.Attributes["destination_cidr_block"],
|
||||||
|
rs.Primary.Attributes["destination_ipv6_cidr_block"],
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -219,6 +257,7 @@ func testAccCheckAWSRouteDestroy(s *terraform.State) error {
|
||||||
conn,
|
conn,
|
||||||
rs.Primary.Attributes["route_table_id"],
|
rs.Primary.Attributes["route_table_id"],
|
||||||
rs.Primary.Attributes["destination_cidr_block"],
|
rs.Primary.Attributes["destination_cidr_block"],
|
||||||
|
rs.Primary.Attributes["destination_ipv6_cidr_block"],
|
||||||
)
|
)
|
||||||
|
|
||||||
if route == nil && err == nil {
|
if route == nil && err == nil {
|
||||||
|
@ -249,6 +288,29 @@ resource "aws_route" "bar" {
|
||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
|
var testAccAWSRouteConfigIpv6 = fmt.Sprintf(`
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
assign_generated_ipv6_cidr_block = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_egress_only_internet_gateway" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "foo" {
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route" "bar" {
|
||||||
|
route_table_id = "${aws_route_table.foo.id}"
|
||||||
|
destination_ipv6_cidr_block = "::/0"
|
||||||
|
egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
`)
|
||||||
|
|
||||||
var testAccAWSRouteBasicConfigChangeCidr = fmt.Sprint(`
|
var testAccAWSRouteBasicConfigChangeCidr = fmt.Sprint(`
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
|
|
|
@ -319,7 +319,7 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e
|
||||||
}
|
}
|
||||||
_, err := s3conn.DeleteObject(&input)
|
_, err := s3conn.DeleteObject(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error deleting S3 bucket object: %s", err)
|
return fmt.Errorf("Error deleting S3 bucket object: %s Bucket: %q Object: %q", err, bucket, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -44,7 +44,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"name_prefix": &schema.Schema{
|
"name_prefix": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
@ -58,7 +58,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"description": &schema.Schema{
|
"description": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
@ -73,49 +73,55 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_id": &schema.Schema{
|
"vpc_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"ingress": &schema.Schema{
|
"ingress": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"from_port": &schema.Schema{
|
"from_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"to_port": &schema.Schema{
|
"to_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"protocol": &schema.Schema{
|
"protocol": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
StateFunc: protocolStateFunc,
|
StateFunc: protocolStateFunc,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cidr_blocks": &schema.Schema{
|
"cidr_blocks": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
"security_groups": &schema.Schema{
|
"ipv6_cidr_blocks": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"security_groups": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"self": &schema.Schema{
|
"self": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
|
@ -125,48 +131,54 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
||||||
Set: resourceAwsSecurityGroupRuleHash,
|
Set: resourceAwsSecurityGroupRuleHash,
|
||||||
},
|
},
|
||||||
|
|
||||||
"egress": &schema.Schema{
|
"egress": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"from_port": &schema.Schema{
|
"from_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"to_port": &schema.Schema{
|
"to_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"protocol": &schema.Schema{
|
"protocol": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
StateFunc: protocolStateFunc,
|
StateFunc: protocolStateFunc,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cidr_blocks": &schema.Schema{
|
"cidr_blocks": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
"prefix_list_ids": &schema.Schema{
|
"ipv6_cidr_blocks": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
"security_groups": &schema.Schema{
|
"prefix_list_ids": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
|
"security_groups": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"self": &schema.Schema{
|
"self": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
|
@ -176,7 +188,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
||||||
Set: resourceAwsSecurityGroupRuleHash,
|
Set: resourceAwsSecurityGroupRuleHash,
|
||||||
},
|
},
|
||||||
|
|
||||||
"owner_id": &schema.Schema{
|
"owner_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
@ -252,11 +264,11 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
|
||||||
req := &ec2.RevokeSecurityGroupEgressInput{
|
req := &ec2.RevokeSecurityGroupEgressInput{
|
||||||
GroupId: createResp.GroupId,
|
GroupId: createResp.GroupId,
|
||||||
IpPermissions: []*ec2.IpPermission{
|
IpPermissions: []*ec2.IpPermission{
|
||||||
&ec2.IpPermission{
|
{
|
||||||
FromPort: aws.Int64(int64(0)),
|
FromPort: aws.Int64(int64(0)),
|
||||||
ToPort: aws.Int64(int64(0)),
|
ToPort: aws.Int64(int64(0)),
|
||||||
IpRanges: []*ec2.IpRange{
|
IpRanges: []*ec2.IpRange{
|
||||||
&ec2.IpRange{
|
{
|
||||||
CidrIp: aws.String("0.0.0.0/0"),
|
CidrIp: aws.String("0.0.0.0/0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -412,6 +424,18 @@ func resourceAwsSecurityGroupRuleHash(v interface{}) int {
|
||||||
buf.WriteString(fmt.Sprintf("%s-", v))
|
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if v, ok := m["ipv6_cidr_blocks"]; ok {
|
||||||
|
vs := v.([]interface{})
|
||||||
|
s := make([]string, len(vs))
|
||||||
|
for i, raw := range vs {
|
||||||
|
s[i] = raw.(string)
|
||||||
|
}
|
||||||
|
sort.Strings(s)
|
||||||
|
|
||||||
|
for _, v := range s {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
if v, ok := m["prefix_list_ids"]; ok {
|
if v, ok := m["prefix_list_ids"]; ok {
|
||||||
vs := v.([]interface{})
|
vs := v.([]interface{})
|
||||||
s := make([]string, len(vs))
|
s := make([]string, len(vs))
|
||||||
|
@ -476,6 +500,20 @@ func resourceAwsSecurityGroupIPPermGather(groupId string, permissions []*ec2.IpP
|
||||||
m["cidr_blocks"] = list
|
m["cidr_blocks"] = list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(perm.Ipv6Ranges) > 0 {
|
||||||
|
raw, ok := m["ipv6_cidr_blocks"]
|
||||||
|
if !ok {
|
||||||
|
raw = make([]string, 0, len(perm.Ipv6Ranges))
|
||||||
|
}
|
||||||
|
list := raw.([]string)
|
||||||
|
|
||||||
|
for _, ip := range perm.Ipv6Ranges {
|
||||||
|
list = append(list, *ip.CidrIpv6)
|
||||||
|
}
|
||||||
|
|
||||||
|
m["ipv6_cidr_blocks"] = list
|
||||||
|
}
|
||||||
|
|
||||||
if len(perm.PrefixListIds) > 0 {
|
if len(perm.PrefixListIds) > 0 {
|
||||||
raw, ok := m["prefix_list_ids"]
|
raw, ok := m["prefix_list_ids"]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -699,8 +737,9 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
// local rule we're examining
|
// local rule we're examining
|
||||||
rHash := idHash(rType, r["protocol"].(string), r["to_port"].(int64), r["from_port"].(int64), remoteSelfVal)
|
rHash := idHash(rType, r["protocol"].(string), r["to_port"].(int64), r["from_port"].(int64), remoteSelfVal)
|
||||||
if rHash == localHash {
|
if rHash == localHash {
|
||||||
var numExpectedCidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemotePrefixLists, numRemoteSGs int
|
var numExpectedCidrs, numExpectedIpv6Cidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemoteIpv6Cidrs, numRemotePrefixLists, numRemoteSGs int
|
||||||
var matchingCidrs []string
|
var matchingCidrs []string
|
||||||
|
var matchingIpv6Cidrs []string
|
||||||
var matchingSGs []string
|
var matchingSGs []string
|
||||||
var matchingPrefixLists []string
|
var matchingPrefixLists []string
|
||||||
|
|
||||||
|
@ -710,6 +749,10 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
if ok {
|
if ok {
|
||||||
numExpectedCidrs = len(l["cidr_blocks"].([]interface{}))
|
numExpectedCidrs = len(l["cidr_blocks"].([]interface{}))
|
||||||
}
|
}
|
||||||
|
liRaw, ok := l["ipv6_cidr_blocks"]
|
||||||
|
if ok {
|
||||||
|
numExpectedIpv6Cidrs = len(l["ipv6_cidr_blocks"].([]interface{}))
|
||||||
|
}
|
||||||
lpRaw, ok := l["prefix_list_ids"]
|
lpRaw, ok := l["prefix_list_ids"]
|
||||||
if ok {
|
if ok {
|
||||||
numExpectedPrefixLists = len(l["prefix_list_ids"].([]interface{}))
|
numExpectedPrefixLists = len(l["prefix_list_ids"].([]interface{}))
|
||||||
|
@ -723,6 +766,10 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
if ok {
|
if ok {
|
||||||
numRemoteCidrs = len(r["cidr_blocks"].([]string))
|
numRemoteCidrs = len(r["cidr_blocks"].([]string))
|
||||||
}
|
}
|
||||||
|
riRaw, ok := r["ipv6_cidr_blocks"]
|
||||||
|
if ok {
|
||||||
|
numRemoteIpv6Cidrs = len(r["ipv6_cidr_blocks"].([]string))
|
||||||
|
}
|
||||||
rpRaw, ok := r["prefix_list_ids"]
|
rpRaw, ok := r["prefix_list_ids"]
|
||||||
if ok {
|
if ok {
|
||||||
numRemotePrefixLists = len(r["prefix_list_ids"].([]string))
|
numRemotePrefixLists = len(r["prefix_list_ids"].([]string))
|
||||||
|
@ -738,6 +785,10 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
log.Printf("[DEBUG] Local rule has more CIDR blocks, continuing (%d/%d)", numExpectedCidrs, numRemoteCidrs)
|
log.Printf("[DEBUG] Local rule has more CIDR blocks, continuing (%d/%d)", numExpectedCidrs, numRemoteCidrs)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if numExpectedIpv6Cidrs > numRemoteIpv6Cidrs {
|
||||||
|
log.Printf("[DEBUG] Local rule has more IPV6 CIDR blocks, continuing (%d/%d)", numExpectedIpv6Cidrs, numRemoteIpv6Cidrs)
|
||||||
|
continue
|
||||||
|
}
|
||||||
if numExpectedPrefixLists > numRemotePrefixLists {
|
if numExpectedPrefixLists > numRemotePrefixLists {
|
||||||
log.Printf("[DEBUG] Local rule has more prefix lists, continuing (%d/%d)", numExpectedPrefixLists, numRemotePrefixLists)
|
log.Printf("[DEBUG] Local rule has more prefix lists, continuing (%d/%d)", numExpectedPrefixLists, numRemotePrefixLists)
|
||||||
continue
|
continue
|
||||||
|
@ -775,6 +826,29 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//IPV6 CIDRs
|
||||||
|
var localIpv6Cidrs []interface{}
|
||||||
|
if liRaw != nil {
|
||||||
|
localIpv6Cidrs = liRaw.([]interface{})
|
||||||
|
}
|
||||||
|
localIpv6CidrSet := schema.NewSet(schema.HashString, localIpv6Cidrs)
|
||||||
|
|
||||||
|
var remoteIpv6Cidrs []string
|
||||||
|
if riRaw != nil {
|
||||||
|
remoteIpv6Cidrs = riRaw.([]string)
|
||||||
|
}
|
||||||
|
var listIpv6 []interface{}
|
||||||
|
for _, s := range remoteIpv6Cidrs {
|
||||||
|
listIpv6 = append(listIpv6, s)
|
||||||
|
}
|
||||||
|
remoteIpv6CidrSet := schema.NewSet(schema.HashString, listIpv6)
|
||||||
|
|
||||||
|
for _, s := range localIpv6CidrSet.List() {
|
||||||
|
if remoteIpv6CidrSet.Contains(s) {
|
||||||
|
matchingIpv6Cidrs = append(matchingIpv6Cidrs, s.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// match prefix lists by converting both to sets, and using Set methods
|
// match prefix lists by converting both to sets, and using Set methods
|
||||||
var localPrefixLists []interface{}
|
var localPrefixLists []interface{}
|
||||||
if lpRaw != nil {
|
if lpRaw != nil {
|
||||||
|
@ -830,6 +904,7 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
// match, and then remove those elements from the remote rule, so that
|
// match, and then remove those elements from the remote rule, so that
|
||||||
// this remote rule can still be considered by other local rules
|
// this remote rule can still be considered by other local rules
|
||||||
if numExpectedCidrs == len(matchingCidrs) {
|
if numExpectedCidrs == len(matchingCidrs) {
|
||||||
|
if numExpectedIpv6Cidrs == len(matchingIpv6Cidrs) {
|
||||||
if numExpectedPrefixLists == len(matchingPrefixLists) {
|
if numExpectedPrefixLists == len(matchingPrefixLists) {
|
||||||
if numExpectedSGs == len(matchingSGs) {
|
if numExpectedSGs == len(matchingSGs) {
|
||||||
// confirm that self references match
|
// confirm that self references match
|
||||||
|
@ -857,6 +932,21 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
delete(r, "cidr_blocks")
|
delete(r, "cidr_blocks")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//// IPV6
|
||||||
|
//// Comparison
|
||||||
|
diffIpv6Cidr := remoteIpv6CidrSet.Difference(localIpv6CidrSet)
|
||||||
|
var newIpv6Cidr []string
|
||||||
|
for _, cRaw := range diffIpv6Cidr.List() {
|
||||||
|
newIpv6Cidr = append(newIpv6Cidr, cRaw.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
// reassigning
|
||||||
|
if len(newIpv6Cidr) > 0 {
|
||||||
|
r["ipv6_cidr_blocks"] = newIpv6Cidr
|
||||||
|
} else {
|
||||||
|
delete(r, "ipv6_cidr_blocks")
|
||||||
|
}
|
||||||
|
|
||||||
// pop local prefix lists from remote
|
// pop local prefix lists from remote
|
||||||
diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet)
|
diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet)
|
||||||
var newPrefixLists []string
|
var newPrefixLists []string
|
||||||
|
@ -883,20 +973,24 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// Here we catch any remote rules that have not been stripped of all self,
|
// Here we catch any remote rules that have not been stripped of all self,
|
||||||
// cidrs, and security groups. We'll add remote rules here that have not been
|
// cidrs, and security groups. We'll add remote rules here that have not been
|
||||||
// matched locally, and let the graph sort things out. This will happen when
|
// matched locally, and let the graph sort things out. This will happen when
|
||||||
// rules are added externally to Terraform
|
// rules are added externally to Terraform
|
||||||
for _, r := range remote {
|
for _, r := range remote {
|
||||||
var lenCidr, lenPrefixLists, lenSGs int
|
var lenCidr, lenIpv6Cidr, lenPrefixLists, lenSGs int
|
||||||
if rCidrs, ok := r["cidr_blocks"]; ok {
|
if rCidrs, ok := r["cidr_blocks"]; ok {
|
||||||
lenCidr = len(rCidrs.([]string))
|
lenCidr = len(rCidrs.([]string))
|
||||||
}
|
}
|
||||||
|
if rIpv6Cidrs, ok := r["ipv6_cidr_blocks"]; ok {
|
||||||
|
lenIpv6Cidr = len(rIpv6Cidrs.([]string))
|
||||||
|
}
|
||||||
if rPrefixLists, ok := r["prefix_list_ids"]; ok {
|
if rPrefixLists, ok := r["prefix_list_ids"]; ok {
|
||||||
lenPrefixLists = len(rPrefixLists.([]string))
|
lenPrefixLists = len(rPrefixLists.([]string))
|
||||||
}
|
}
|
||||||
|
@ -910,7 +1004,7 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if lenSGs+lenCidr+lenPrefixLists > 0 {
|
if lenSGs+lenCidr+lenIpv6Cidr+lenPrefixLists > 0 {
|
||||||
log.Printf("[DEBUG] Found a remote Rule that wasn't empty: (%#v)", r)
|
log.Printf("[DEBUG] Found a remote Rule that wasn't empty: (%#v)", r)
|
||||||
saves = append(saves, r)
|
saves = append(saves, r)
|
||||||
}
|
}
|
||||||
|
@ -1003,15 +1097,15 @@ func deleteLingeringLambdaENIs(conn *ec2.EC2, d *schema.ResourceData) error {
|
||||||
// Here we carefully find the offenders
|
// Here we carefully find the offenders
|
||||||
params := &ec2.DescribeNetworkInterfacesInput{
|
params := &ec2.DescribeNetworkInterfacesInput{
|
||||||
Filters: []*ec2.Filter{
|
Filters: []*ec2.Filter{
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("group-id"),
|
Name: aws.String("group-id"),
|
||||||
Values: []*string{aws.String(d.Id())},
|
Values: []*string{aws.String(d.Id())},
|
||||||
},
|
},
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("description"),
|
Name: aws.String("description"),
|
||||||
Values: []*string{aws.String("AWS Lambda VPC ENI: *")},
|
Values: []*string{aws.String("AWS Lambda VPC ENI: *")},
|
||||||
},
|
},
|
||||||
&ec2.Filter{
|
{
|
||||||
Name: aws.String("requester-id"),
|
Name: aws.String("requester-id"),
|
||||||
Values: []*string{aws.String("*:awslambda_*")},
|
Values: []*string{aws.String("*:awslambda_*")},
|
||||||
},
|
},
|
||||||
|
|
|
@ -61,6 +61,13 @@ func resourceAwsSecurityGroupRule() *schema.Resource {
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"ipv6_cidr_blocks": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
"prefix_list_ids": {
|
"prefix_list_ids": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -400,6 +407,19 @@ func findRuleMatch(p *ec2.IpPermission, rules []*ec2.IpPermission, isVPC bool) *
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
remaining = len(p.Ipv6Ranges)
|
||||||
|
for _, ipv6 := range p.Ipv6Ranges {
|
||||||
|
for _, ipv6ip := range r.Ipv6Ranges {
|
||||||
|
if *ipv6.CidrIpv6 == *ipv6ip.CidrIpv6 {
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if remaining > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
remaining = len(p.PrefixListIds)
|
remaining = len(p.PrefixListIds)
|
||||||
for _, pl := range p.PrefixListIds {
|
for _, pl := range p.PrefixListIds {
|
||||||
for _, rpl := range r.PrefixListIds {
|
for _, rpl := range r.PrefixListIds {
|
||||||
|
@ -463,6 +483,18 @@ func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(ip.Ipv6Ranges) > 0 {
|
||||||
|
s := make([]string, len(ip.Ipv6Ranges))
|
||||||
|
for i, r := range ip.Ipv6Ranges {
|
||||||
|
s[i] = *r.CidrIpv6
|
||||||
|
}
|
||||||
|
sort.Strings(s)
|
||||||
|
|
||||||
|
for _, v := range s {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(ip.PrefixListIds) > 0 {
|
if len(ip.PrefixListIds) > 0 {
|
||||||
s := make([]string, len(ip.PrefixListIds))
|
s := make([]string, len(ip.PrefixListIds))
|
||||||
for i, pl := range ip.PrefixListIds {
|
for i, pl := range ip.PrefixListIds {
|
||||||
|
@ -555,6 +587,18 @@ func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) (*ec2.IpPermiss
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if raw, ok := d.GetOk("ipv6_cidr_blocks"); ok {
|
||||||
|
list := raw.([]interface{})
|
||||||
|
perm.Ipv6Ranges = make([]*ec2.Ipv6Range, len(list))
|
||||||
|
for i, v := range list {
|
||||||
|
cidrIP, ok := v.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("empty element found in ipv6_cidr_blocks - consider using the compact function")
|
||||||
|
}
|
||||||
|
perm.Ipv6Ranges[i] = &ec2.Ipv6Range{CidrIpv6: aws.String(cidrIP)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if raw, ok := d.GetOk("prefix_list_ids"); ok {
|
if raw, ok := d.GetOk("prefix_list_ids"); ok {
|
||||||
list := raw.([]interface{})
|
list := raw.([]interface{})
|
||||||
perm.PrefixListIds = make([]*ec2.PrefixListId, len(list))
|
perm.PrefixListIds = make([]*ec2.PrefixListId, len(list))
|
||||||
|
@ -584,6 +628,12 @@ func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPe
|
||||||
|
|
||||||
d.Set("cidr_blocks", cb)
|
d.Set("cidr_blocks", cb)
|
||||||
|
|
||||||
|
var ipv6 []string
|
||||||
|
for _, ip := range rule.Ipv6Ranges {
|
||||||
|
ipv6 = append(ipv6, *ip.CidrIpv6)
|
||||||
|
}
|
||||||
|
d.Set("ipv6_cidr_blocks", ipv6)
|
||||||
|
|
||||||
var pl []string
|
var pl []string
|
||||||
for _, p := range rule.PrefixListIds {
|
for _, p := range rule.PrefixListIds {
|
||||||
pl = append(pl, *p.PrefixListId)
|
pl = append(pl, *p.PrefixListId)
|
||||||
|
@ -603,15 +653,16 @@ func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPe
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validates that either 'cidr_blocks', 'self', or 'source_security_group_id' is set
|
// Validates that either 'cidr_blocks', 'ipv6_cidr_blocks', 'self', or 'source_security_group_id' is set
|
||||||
func validateAwsSecurityGroupRule(d *schema.ResourceData) error {
|
func validateAwsSecurityGroupRule(d *schema.ResourceData) error {
|
||||||
_, blocksOk := d.GetOk("cidr_blocks")
|
_, blocksOk := d.GetOk("cidr_blocks")
|
||||||
|
_, ipv6Ok := d.GetOk("ipv6_cidr_blocks")
|
||||||
_, sourceOk := d.GetOk("source_security_group_id")
|
_, sourceOk := d.GetOk("source_security_group_id")
|
||||||
_, selfOk := d.GetOk("self")
|
_, selfOk := d.GetOk("self")
|
||||||
_, prefixOk := d.GetOk("prefix_list_ids")
|
_, prefixOk := d.GetOk("prefix_list_ids")
|
||||||
if !blocksOk && !sourceOk && !selfOk && !prefixOk {
|
if !blocksOk && !sourceOk && !selfOk && !prefixOk && !ipv6Ok {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"One of ['cidr_blocks', 'self', 'source_security_group_id', 'prefix_list_ids'] must be set to create an AWS Security Group Rule")
|
"One of ['cidr_blocks', 'ipv6_cidr_blocks', 'self', 'source_security_group_id', 'prefix_list_ids'] must be set to create an AWS Security Group Rule")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,15 +52,15 @@ func TestIpPermissionIDHash(t *testing.T) {
|
||||||
FromPort: aws.Int64(int64(80)),
|
FromPort: aws.Int64(int64(80)),
|
||||||
ToPort: aws.Int64(int64(8000)),
|
ToPort: aws.Int64(int64(8000)),
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("987654321"),
|
UserId: aws.String("987654321"),
|
||||||
GroupId: aws.String("sg-12345678"),
|
GroupId: aws.String("sg-12345678"),
|
||||||
},
|
},
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("123456789"),
|
UserId: aws.String("123456789"),
|
||||||
GroupId: aws.String("sg-987654321"),
|
GroupId: aws.String("sg-987654321"),
|
||||||
},
|
},
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("123456789"),
|
UserId: aws.String("123456789"),
|
||||||
GroupId: aws.String("sg-12345678"),
|
GroupId: aws.String("sg-12345678"),
|
||||||
},
|
},
|
||||||
|
@ -72,15 +72,15 @@ func TestIpPermissionIDHash(t *testing.T) {
|
||||||
FromPort: aws.Int64(int64(80)),
|
FromPort: aws.Int64(int64(80)),
|
||||||
ToPort: aws.Int64(int64(8000)),
|
ToPort: aws.Int64(int64(8000)),
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("987654321"),
|
UserId: aws.String("987654321"),
|
||||||
GroupName: aws.String("my-security-group"),
|
GroupName: aws.String("my-security-group"),
|
||||||
},
|
},
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("123456789"),
|
UserId: aws.String("123456789"),
|
||||||
GroupName: aws.String("my-security-group"),
|
GroupName: aws.String("my-security-group"),
|
||||||
},
|
},
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("123456789"),
|
UserId: aws.String("123456789"),
|
||||||
GroupName: aws.String("my-other-security-group"),
|
GroupName: aws.String("my-other-security-group"),
|
||||||
},
|
},
|
||||||
|
@ -183,6 +183,46 @@ func TestAccAWSSecurityGroupRule_Ingress_Protocol(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSSecurityGroupRule_Ingress_Ipv6(t *testing.T) {
|
||||||
|
var group ec2.SecurityGroup
|
||||||
|
|
||||||
|
testRuleCount := func(*terraform.State) error {
|
||||||
|
if len(group.IpPermissions) != 1 {
|
||||||
|
return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d",
|
||||||
|
1, len(group.IpPermissions))
|
||||||
|
}
|
||||||
|
|
||||||
|
rule := group.IpPermissions[0]
|
||||||
|
if *rule.FromPort != int64(80) {
|
||||||
|
return fmt.Errorf("Wrong Security Group port setting, expected %d, got %d",
|
||||||
|
80, int(*rule.FromPort))
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv6Address := rule.Ipv6Ranges[0]
|
||||||
|
if *ipv6Address.CidrIpv6 != "::/0" {
|
||||||
|
return fmt.Errorf("Wrong Security Group IPv6 address, expected %s, got %s",
|
||||||
|
"::/0", *ipv6Address.CidrIpv6)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSSecurityGroupRuleIngress_ipv6Config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||||
|
testRuleCount,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) {
|
func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) {
|
||||||
var group ec2.SecurityGroup
|
var group ec2.SecurityGroup
|
||||||
rInt := acctest.RandInt()
|
rInt := acctest.RandInt()
|
||||||
|
@ -376,7 +416,7 @@ func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) {
|
||||||
ToPort: aws.Int64(80),
|
ToPort: aws.Int64(80),
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
&ec2.UserIdGroupPair{GroupId: nat.GroupId},
|
{GroupId: nat.GroupId},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -696,6 +736,34 @@ func testAccAWSSecurityGroupRuleIngressConfig(rInt int) string {
|
||||||
}`, rInt)
|
}`, rInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const testAccAWSSecurityGroupRuleIngress_ipv6Config = `
|
||||||
|
resource "aws_vpc" "tftest" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "tf-testing"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "web" {
|
||||||
|
vpc_id = "${aws_vpc.tftest.id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "tf-acc-test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "ingress_1" {
|
||||||
|
type = "ingress"
|
||||||
|
protocol = "6"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 8000
|
||||||
|
ipv6_cidr_blocks = ["::/0"]
|
||||||
|
|
||||||
|
security_group_id = "${aws_security_group.web.id}"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccAWSSecurityGroupRuleIngress_protocolConfig = `
|
const testAccAWSSecurityGroupRuleIngress_protocolConfig = `
|
||||||
resource "aws_vpc" "tftest" {
|
resource "aws_vpc" "tftest" {
|
||||||
cidr_block = "10.0.0.0/16"
|
cidr_block = "10.0.0.0/16"
|
||||||
|
|
|
@ -135,54 +135,54 @@ func TestProtocolForValue(t *testing.T) {
|
||||||
|
|
||||||
func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
||||||
raw := []*ec2.IpPermission{
|
raw := []*ec2.IpPermission{
|
||||||
&ec2.IpPermission{
|
{
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
FromPort: aws.Int64(int64(1)),
|
FromPort: aws.Int64(int64(1)),
|
||||||
ToPort: aws.Int64(int64(-1)),
|
ToPort: aws.Int64(int64(-1)),
|
||||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("0.0.0.0/0")}},
|
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("0.0.0.0/0")}},
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
GroupId: aws.String("sg-11111"),
|
GroupId: aws.String("sg-11111"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&ec2.IpPermission{
|
{
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
FromPort: aws.Int64(int64(80)),
|
FromPort: aws.Int64(int64(80)),
|
||||||
ToPort: aws.Int64(int64(80)),
|
ToPort: aws.Int64(int64(80)),
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
// VPC
|
// VPC
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
GroupId: aws.String("sg-22222"),
|
GroupId: aws.String("sg-22222"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&ec2.IpPermission{
|
{
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
FromPort: aws.Int64(int64(443)),
|
FromPort: aws.Int64(int64(443)),
|
||||||
ToPort: aws.Int64(int64(443)),
|
ToPort: aws.Int64(int64(443)),
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
// Classic
|
// Classic
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("12345"),
|
UserId: aws.String("12345"),
|
||||||
GroupId: aws.String("sg-33333"),
|
GroupId: aws.String("sg-33333"),
|
||||||
GroupName: aws.String("ec2_classic"),
|
GroupName: aws.String("ec2_classic"),
|
||||||
},
|
},
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
UserId: aws.String("amazon-elb"),
|
UserId: aws.String("amazon-elb"),
|
||||||
GroupId: aws.String("sg-d2c979d3"),
|
GroupId: aws.String("sg-d2c979d3"),
|
||||||
GroupName: aws.String("amazon-elb-sg"),
|
GroupName: aws.String("amazon-elb-sg"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&ec2.IpPermission{
|
{
|
||||||
IpProtocol: aws.String("-1"),
|
IpProtocol: aws.String("-1"),
|
||||||
FromPort: aws.Int64(int64(0)),
|
FromPort: aws.Int64(int64(0)),
|
||||||
ToPort: aws.Int64(int64(0)),
|
ToPort: aws.Int64(int64(0)),
|
||||||
PrefixListIds: []*ec2.PrefixListId{&ec2.PrefixListId{PrefixListId: aws.String("pl-12345678")}},
|
PrefixListIds: []*ec2.PrefixListId{{PrefixListId: aws.String("pl-12345678")}},
|
||||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||||
// VPC
|
// VPC
|
||||||
&ec2.UserIdGroupPair{
|
{
|
||||||
GroupId: aws.String("sg-22222"),
|
GroupId: aws.String("sg-22222"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -190,14 +190,14 @@ func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
local := []map[string]interface{}{
|
local := []map[string]interface{}{
|
||||||
map[string]interface{}{
|
{
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"from_port": int64(1),
|
"from_port": int64(1),
|
||||||
"to_port": int64(-1),
|
"to_port": int64(-1),
|
||||||
"cidr_blocks": []string{"0.0.0.0/0"},
|
"cidr_blocks": []string{"0.0.0.0/0"},
|
||||||
"self": true,
|
"self": true,
|
||||||
},
|
},
|
||||||
map[string]interface{}{
|
{
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"from_port": int64(80),
|
"from_port": int64(80),
|
||||||
"to_port": int64(80),
|
"to_port": int64(80),
|
||||||
|
@ -205,7 +205,7 @@ func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
||||||
"sg-22222",
|
"sg-22222",
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
map[string]interface{}{
|
{
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"from_port": int64(443),
|
"from_port": int64(443),
|
||||||
"to_port": int64(443),
|
"to_port": int64(443),
|
||||||
|
@ -214,7 +214,7 @@ func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
||||||
"amazon-elb/amazon-elb-sg",
|
"amazon-elb/amazon-elb-sg",
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
map[string]interface{}{
|
{
|
||||||
"protocol": "-1",
|
"protocol": "-1",
|
||||||
"from_port": int64(0),
|
"from_port": int64(0),
|
||||||
"to_port": int64(0),
|
"to_port": int64(0),
|
||||||
|
@ -263,7 +263,7 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig,
|
Config: testAccAWSSecurityGroupConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -288,6 +288,39 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSSecurityGroup_ipv6(t *testing.T) {
|
||||||
|
var group ec2.SecurityGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "aws_security_group.web",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSSecurityGroupConfigIpv6,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "name", "terraform_acceptance_test_example"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "description", "Used in the terraform acceptance tests"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "ingress.2293451516.protocol", "tcp"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "ingress.2293451516.from_port", "80"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "ingress.2293451516.to_port", "8000"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "ingress.2293451516.ipv6_cidr_blocks.#", "1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_security_group.web", "ingress.2293451516.ipv6_cidr_blocks.0", "::/0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSSecurityGroup_tagsCreatedFirst(t *testing.T) {
|
func TestAccAWSSecurityGroup_tagsCreatedFirst(t *testing.T) {
|
||||||
var group ec2.SecurityGroup
|
var group ec2.SecurityGroup
|
||||||
|
|
||||||
|
@ -296,7 +329,7 @@ func TestAccAWSSecurityGroup_tagsCreatedFirst(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigForTagsOrdering,
|
Config: testAccAWSSecurityGroupConfigForTagsOrdering,
|
||||||
ExpectError: regexp.MustCompile("InvalidParameterValue"),
|
ExpectError: regexp.MustCompile("InvalidParameterValue"),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
@ -318,7 +351,7 @@ func TestAccAWSSecurityGroup_namePrefix(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupPrefixNameConfig,
|
Config: testAccAWSSecurityGroupPrefixNameConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.baz", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.baz", &group),
|
||||||
|
@ -353,7 +386,7 @@ func TestAccAWSSecurityGroup_self(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigSelf,
|
Config: testAccAWSSecurityGroupConfigSelf,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -393,7 +426,7 @@ func TestAccAWSSecurityGroup_vpc(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigVpc,
|
Config: testAccAWSSecurityGroupConfigVpc,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -446,7 +479,7 @@ func TestAccAWSSecurityGroup_vpcNegOneIngress(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigVpcNegOneIngress,
|
Config: testAccAWSSecurityGroupConfigVpcNegOneIngress,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -488,7 +521,7 @@ func TestAccAWSSecurityGroup_vpcProtoNumIngress(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigVpcProtoNumIngress,
|
Config: testAccAWSSecurityGroupConfigVpcProtoNumIngress,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -521,7 +554,7 @@ func TestAccAWSSecurityGroup_MultiIngress(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigMultiIngress,
|
Config: testAccAWSSecurityGroupConfigMultiIngress,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -540,13 +573,13 @@ func TestAccAWSSecurityGroup_Change(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig,
|
Config: testAccAWSSecurityGroupConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigChange,
|
Config: testAccAWSSecurityGroupConfigChange,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -566,7 +599,7 @@ func TestAccAWSSecurityGroup_generatedName(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_generatedName,
|
Config: testAccAWSSecurityGroupConfig_generatedName,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -596,7 +629,7 @@ func TestAccAWSSecurityGroup_DefaultEgress_VPC(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigDefaultEgress,
|
Config: testAccAWSSecurityGroupConfigDefaultEgress,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExistsWithoutDefault("aws_security_group.worker"),
|
testAccCheckAWSSecurityGroupExistsWithoutDefault("aws_security_group.worker"),
|
||||||
|
@ -616,7 +649,7 @@ func TestAccAWSSecurityGroup_DefaultEgress_Classic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigClassic,
|
Config: testAccAWSSecurityGroupConfigClassic,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -634,7 +667,7 @@ func TestAccAWSSecurityGroup_drift(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_drift(),
|
Config: testAccAWSSecurityGroupConfig_drift(),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -664,7 +697,7 @@ func TestAccAWSSecurityGroup_drift_complex(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_drift_complex(),
|
Config: testAccAWSSecurityGroupConfig_drift_complex(),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -773,7 +806,7 @@ func testAccCheckAWSSecurityGroupAttributes(group *ec2.SecurityGroup) resource.T
|
||||||
FromPort: aws.Int64(80),
|
FromPort: aws.Int64(80),
|
||||||
ToPort: aws.Int64(8000),
|
ToPort: aws.Int64(8000),
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}},
|
||||||
}
|
}
|
||||||
|
|
||||||
if *group.GroupName != "terraform_acceptance_test_example" {
|
if *group.GroupName != "terraform_acceptance_test_example" {
|
||||||
|
@ -804,7 +837,7 @@ func testAccCheckAWSSecurityGroupAttributesNegOneProtocol(group *ec2.SecurityGro
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
p := &ec2.IpPermission{
|
p := &ec2.IpPermission{
|
||||||
IpProtocol: aws.String("-1"),
|
IpProtocol: aws.String("-1"),
|
||||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}},
|
||||||
}
|
}
|
||||||
|
|
||||||
if *group.GroupName != "terraform_acceptance_test_example" {
|
if *group.GroupName != "terraform_acceptance_test_example" {
|
||||||
|
@ -839,7 +872,7 @@ func TestAccAWSSecurityGroup_tags(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigTags,
|
Config: testAccAWSSecurityGroupConfigTags,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group),
|
||||||
|
@ -847,7 +880,7 @@ func TestAccAWSSecurityGroup_tags(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigTagsUpdate,
|
Config: testAccAWSSecurityGroupConfigTagsUpdate,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group),
|
||||||
|
@ -868,7 +901,7 @@ func TestAccAWSSecurityGroup_CIDRandGroups(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupCombindCIDRandGroups,
|
Config: testAccAWSSecurityGroupCombindCIDRandGroups,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.mixed", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.mixed", &group),
|
||||||
|
@ -887,7 +920,7 @@ func TestAccAWSSecurityGroup_ingressWithCidrAndSGs(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs,
|
Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -913,7 +946,7 @@ func TestAccAWSSecurityGroup_ingressWithCidrAndSGs_classic(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs_classic,
|
Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs_classic,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||||
|
@ -938,7 +971,7 @@ func TestAccAWSSecurityGroup_egressWithPrefixList(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfigPrefixListEgress,
|
Config: testAccAWSSecurityGroupConfigPrefixListEgress,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.egress", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.egress", &group),
|
||||||
|
@ -1016,21 +1049,21 @@ func testAccCheckAWSSecurityGroupPrefixListAttributes(group *ec2.SecurityGroup)
|
||||||
func testAccCheckAWSSecurityGroupAttributesChanged(group *ec2.SecurityGroup) resource.TestCheckFunc {
|
func testAccCheckAWSSecurityGroupAttributesChanged(group *ec2.SecurityGroup) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
p := []*ec2.IpPermission{
|
p := []*ec2.IpPermission{
|
||||||
&ec2.IpPermission{
|
{
|
||||||
FromPort: aws.Int64(80),
|
FromPort: aws.Int64(80),
|
||||||
ToPort: aws.Int64(9000),
|
ToPort: aws.Int64(9000),
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}},
|
||||||
},
|
},
|
||||||
&ec2.IpPermission{
|
{
|
||||||
FromPort: aws.Int64(80),
|
FromPort: aws.Int64(80),
|
||||||
ToPort: aws.Int64(8000),
|
ToPort: aws.Int64(8000),
|
||||||
IpProtocol: aws.String("tcp"),
|
IpProtocol: aws.String("tcp"),
|
||||||
IpRanges: []*ec2.IpRange{
|
IpRanges: []*ec2.IpRange{
|
||||||
&ec2.IpRange{
|
{
|
||||||
CidrIp: aws.String("0.0.0.0/0"),
|
CidrIp: aws.String("0.0.0.0/0"),
|
||||||
},
|
},
|
||||||
&ec2.IpRange{
|
{
|
||||||
CidrIp: aws.String("10.0.0.0/8"),
|
CidrIp: aws.String("10.0.0.0/8"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1109,7 +1142,7 @@ func TestAccAWSSecurityGroup_failWithDiffMismatch(t *testing.T) {
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSSecurityGroupConfig_failWithDiffMismatch,
|
Config: testAccAWSSecurityGroupConfig_failWithDiffMismatch,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSSecurityGroupExists("aws_security_group.nat", &group),
|
testAccCheckAWSSecurityGroupExists("aws_security_group.nat", &group),
|
||||||
|
@ -1148,6 +1181,36 @@ resource "aws_security_group" "web" {
|
||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
|
const testAccAWSSecurityGroupConfigIpv6 = `
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "web" {
|
||||||
|
name = "terraform_acceptance_test_example"
|
||||||
|
description = "Used in the terraform acceptance tests"
|
||||||
|
vpc_id = "${aws_vpc.foo.id}"
|
||||||
|
|
||||||
|
ingress {
|
||||||
|
protocol = "6"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 8000
|
||||||
|
ipv6_cidr_blocks = ["::/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
egress {
|
||||||
|
protocol = "tcp"
|
||||||
|
from_port = 80
|
||||||
|
to_port = 8000
|
||||||
|
ipv6_cidr_blocks = ["::/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "tf-acc-test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccAWSSecurityGroupConfig = `
|
const testAccAWSSecurityGroupConfig = `
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
|
|
|
@ -2,9 +2,6 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha1"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -29,73 +26,79 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
||||||
MigrateState: resourceAwsSpotFleetRequestMigrateState,
|
MigrateState: resourceAwsSpotFleetRequestMigrateState,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"iam_fleet_role": &schema.Schema{
|
"iam_fleet_role": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
"replace_unhealthy_instances": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
// http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-SpotFleetLaunchSpecification
|
// http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-SpotFleetLaunchSpecification
|
||||||
// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html
|
// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html
|
||||||
"launch_specification": &schema.Schema{
|
"launch_specification": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"vpc_security_group_ids": &schema.Schema{
|
"vpc_security_group_ids": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
"associate_public_ip_address": &schema.Schema{
|
"associate_public_ip_address": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
"ebs_block_device": &schema.Schema{
|
"ebs_block_device": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"delete_on_termination": &schema.Schema{
|
"delete_on_termination": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: true,
|
Default: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"device_name": &schema.Schema{
|
"device_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"encrypted": &schema.Schema{
|
"encrypted": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"iops": &schema.Schema{
|
"iops": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"snapshot_id": &schema.Schema{
|
"snapshot_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"volume_size": &schema.Schema{
|
"volume_size": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"volume_type": &schema.Schema{
|
"volume_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -105,18 +108,18 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
||||||
},
|
},
|
||||||
Set: hashEbsBlockDevice,
|
Set: hashEbsBlockDevice,
|
||||||
},
|
},
|
||||||
"ephemeral_block_device": &schema.Schema{
|
"ephemeral_block_device": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"device_name": &schema.Schema{
|
"device_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"virtual_name": &schema.Schema{
|
"virtual_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
@ -124,7 +127,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
||||||
},
|
},
|
||||||
Set: hashEphemeralBlockDevice,
|
Set: hashEphemeralBlockDevice,
|
||||||
},
|
},
|
||||||
"root_block_device": &schema.Schema{
|
"root_block_device": {
|
||||||
// TODO: This is a set because we don't support singleton
|
// TODO: This is a set because we don't support singleton
|
||||||
// sub-resources today. We'll enforce that the set only ever has
|
// sub-resources today. We'll enforce that the set only ever has
|
||||||
// length zero or one below. When TF gains support for
|
// length zero or one below. When TF gains support for
|
||||||
|
@ -137,25 +140,25 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
||||||
// Termination flag on the block device mapping entry for the root
|
// Termination flag on the block device mapping entry for the root
|
||||||
// device volume." - bit.ly/ec2bdmap
|
// device volume." - bit.ly/ec2bdmap
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"delete_on_termination": &schema.Schema{
|
"delete_on_termination": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: true,
|
Default: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"iops": &schema.Schema{
|
"iops": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"volume_size": &schema.Schema{
|
"volume_size": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"volume_type": &schema.Schema{
|
"volume_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -165,73 +168,74 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
||||||
},
|
},
|
||||||
Set: hashRootBlockDevice,
|
Set: hashRootBlockDevice,
|
||||||
},
|
},
|
||||||
"ebs_optimized": &schema.Schema{
|
"ebs_optimized": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
},
|
},
|
||||||
"iam_instance_profile": &schema.Schema{
|
"iam_instance_profile": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"ami": &schema.Schema{
|
"ami": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"instance_type": &schema.Schema{
|
"instance_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"key_name": &schema.Schema{
|
"key_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ValidateFunc: validateSpotFleetRequestKeyName,
|
ValidateFunc: validateSpotFleetRequestKeyName,
|
||||||
},
|
},
|
||||||
"monitoring": &schema.Schema{
|
"monitoring": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
},
|
},
|
||||||
"placement_group": &schema.Schema{
|
"placement_group": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"spot_price": &schema.Schema{
|
"spot_price": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"user_data": &schema.Schema{
|
"user_data": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
switch v.(type) {
|
switch v.(type) {
|
||||||
case string:
|
case string:
|
||||||
hash := sha1.Sum([]byte(v.(string)))
|
return userDataHashSum(v.(string))
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
default:
|
default:
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"weighted_capacity": &schema.Schema{
|
"weighted_capacity": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"subnet_id": &schema.Schema{
|
"subnet_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"availability_zone": &schema.Schema{
|
"availability_zone": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -242,48 +246,48 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
||||||
Set: hashLaunchSpecification,
|
Set: hashLaunchSpecification,
|
||||||
},
|
},
|
||||||
// Everything on a spot fleet is ForceNew except target_capacity
|
// Everything on a spot fleet is ForceNew except target_capacity
|
||||||
"target_capacity": &schema.Schema{
|
"target_capacity": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
},
|
},
|
||||||
"allocation_strategy": &schema.Schema{
|
"allocation_strategy": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "lowestPrice",
|
Default: "lowestPrice",
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"excess_capacity_termination_policy": &schema.Schema{
|
"excess_capacity_termination_policy": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "Default",
|
Default: "Default",
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
},
|
},
|
||||||
"spot_price": &schema.Schema{
|
"spot_price": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"terminate_instances_with_expiration": &schema.Schema{
|
"terminate_instances_with_expiration": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"valid_from": &schema.Schema{
|
"valid_from": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"valid_until": &schema.Schema{
|
"valid_until": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"spot_request_state": &schema.Schema{
|
"spot_request_state": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"client_token": &schema.Schema{
|
"client_token": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
@ -323,8 +327,7 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d["user_data"]; ok {
|
if v, ok := d["user_data"]; ok {
|
||||||
opts.UserData = aws.String(
|
opts.UserData = aws.String(base64Encode([]byte(v.(string))))
|
||||||
base64Encode([]byte(v.(string))))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d["key_name"]; ok {
|
if v, ok := d["key_name"]; ok {
|
||||||
|
@ -339,21 +342,11 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
|
||||||
opts.WeightedCapacity = aws.Float64(wc)
|
opts.WeightedCapacity = aws.Float64(wc)
|
||||||
}
|
}
|
||||||
|
|
||||||
var groups []*string
|
var securityGroupIds []*string
|
||||||
if v, ok := d["security_groups"]; ok {
|
|
||||||
sgs := v.(*schema.Set).List()
|
|
||||||
for _, v := range sgs {
|
|
||||||
str := v.(string)
|
|
||||||
groups = append(groups, aws.String(str))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var groupIds []*string
|
|
||||||
if v, ok := d["vpc_security_group_ids"]; ok {
|
if v, ok := d["vpc_security_group_ids"]; ok {
|
||||||
if s := v.(*schema.Set); s.Len() > 0 {
|
if s := v.(*schema.Set); s.Len() > 0 {
|
||||||
for _, v := range s.List() {
|
for _, v := range s.List() {
|
||||||
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: aws.String(v.(string))})
|
securityGroupIds = append(securityGroupIds, aws.String(v.(string)))
|
||||||
groupIds = append(groupIds, aws.String(v.(string)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,11 +371,15 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
|
||||||
DeleteOnTermination: aws.Bool(true),
|
DeleteOnTermination: aws.Bool(true),
|
||||||
DeviceIndex: aws.Int64(int64(0)),
|
DeviceIndex: aws.Int64(int64(0)),
|
||||||
SubnetId: aws.String(subnetId.(string)),
|
SubnetId: aws.String(subnetId.(string)),
|
||||||
Groups: groupIds,
|
Groups: securityGroupIds,
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni}
|
opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni}
|
||||||
opts.SubnetId = aws.String("")
|
opts.SubnetId = aws.String("")
|
||||||
|
} else {
|
||||||
|
for _, id := range securityGroupIds {
|
||||||
|
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: id})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn)
|
blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn)
|
||||||
|
@ -534,6 +531,7 @@ func resourceAwsSpotFleetRequestCreate(d *schema.ResourceData, meta interface{})
|
||||||
TargetCapacity: aws.Int64(int64(d.Get("target_capacity").(int))),
|
TargetCapacity: aws.Int64(int64(d.Get("target_capacity").(int))),
|
||||||
ClientToken: aws.String(resource.UniqueId()),
|
ClientToken: aws.String(resource.UniqueId()),
|
||||||
TerminateInstancesWithExpiration: aws.Bool(d.Get("terminate_instances_with_expiration").(bool)),
|
TerminateInstancesWithExpiration: aws.Bool(d.Get("terminate_instances_with_expiration").(bool)),
|
||||||
|
ReplaceUnhealthyInstances: aws.Bool(d.Get("replace_unhealthy_instances").(bool)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("excess_capacity_termination_policy"); ok {
|
if v, ok := d.GetOk("excess_capacity_termination_policy"); ok {
|
||||||
|
@ -725,29 +723,26 @@ func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) e
|
||||||
aws.TimeValue(config.ValidUntil).Format(awsAutoscalingScheduleTimeLayout))
|
aws.TimeValue(config.ValidUntil).Format(awsAutoscalingScheduleTimeLayout))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Set("replace_unhealthy_instances", config.ReplaceUnhealthyInstances)
|
||||||
d.Set("launch_specification", launchSpecsToSet(config.LaunchSpecifications, conn))
|
d.Set("launch_specification", launchSpecsToSet(config.LaunchSpecifications, conn))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func launchSpecsToSet(ls []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
|
func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
|
||||||
specs := &schema.Set{F: hashLaunchSpecification}
|
specSet := &schema.Set{F: hashLaunchSpecification}
|
||||||
for _, val := range ls {
|
for _, spec := range launchSpecs {
|
||||||
dn, err := fetchRootDeviceName(aws.StringValue(val.ImageId), conn)
|
rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panic(err)
|
log.Panic(err)
|
||||||
} else {
|
|
||||||
ls := launchSpecToMap(val, dn)
|
|
||||||
specs.Add(ls)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
specSet.Add(launchSpecToMap(spec, rootDeviceName))
|
||||||
}
|
}
|
||||||
return specs
|
return specSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func launchSpecToMap(
|
func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} {
|
||||||
l *ec2.SpotFleetLaunchSpecification,
|
|
||||||
rootDevName *string,
|
|
||||||
) map[string]interface{} {
|
|
||||||
m := make(map[string]interface{})
|
m := make(map[string]interface{})
|
||||||
|
|
||||||
m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName)
|
m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName)
|
||||||
|
@ -779,10 +774,7 @@ func launchSpecToMap(
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.UserData != nil {
|
if l.UserData != nil {
|
||||||
ud_dec, err := base64.StdEncoding.DecodeString(aws.StringValue(l.UserData))
|
m["user_data"] = userDataHashSum(aws.StringValue(l.UserData))
|
||||||
if err == nil {
|
|
||||||
m["user_data"] = string(ud_dec)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.KeyName != nil {
|
if l.KeyName != nil {
|
||||||
|
@ -797,11 +789,23 @@ func launchSpecToMap(
|
||||||
m["subnet_id"] = aws.StringValue(l.SubnetId)
|
m["subnet_id"] = aws.StringValue(l.SubnetId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
securityGroupIds := &schema.Set{F: schema.HashString}
|
||||||
|
if len(l.NetworkInterfaces) > 0 {
|
||||||
|
// This resource auto-creates one network interface when associate_public_ip_address is true
|
||||||
|
for _, group := range l.NetworkInterfaces[0].Groups {
|
||||||
|
securityGroupIds.Add(aws.StringValue(group))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, group := range l.SecurityGroups {
|
||||||
|
securityGroupIds.Add(aws.StringValue(group.GroupId))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m["vpc_security_group_ids"] = securityGroupIds
|
||||||
|
|
||||||
if l.WeightedCapacity != nil {
|
if l.WeightedCapacity != nil {
|
||||||
m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64)
|
m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// m["security_groups"] = securityGroupsToSet(l.SecutiryGroups)
|
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1009,7 +1013,6 @@ func hashLaunchSpecification(v interface{}) int {
|
||||||
}
|
}
|
||||||
buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string)))
|
buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string)))
|
||||||
buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string)))
|
buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string)))
|
||||||
buf.WriteString(fmt.Sprintf("%s-", m["user_data"].(string)))
|
|
||||||
return hashcode.String(buf.String())
|
return hashcode.String(buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,9 +100,9 @@ func TestAccAWSSpotFleetRequest_lowestPriceAzInGivenList(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"),
|
"aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.3809475891.availability_zone", "us-west-2b"),
|
"aws_spot_fleet_request.foo", "launch_specification.1671188867.availability_zone", "us-west-2b"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -154,13 +154,13 @@ func TestAccAWSSpotFleetRequest_multipleInstanceTypesInSameAz(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"),
|
"aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"),
|
"aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"),
|
"aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.availability_zone", "us-west-2a"),
|
"aws_spot_fleet_request.foo", "launch_specification.590403189.availability_zone", "us-west-2a"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -214,13 +214,13 @@ func TestAccAWSSpotFleetRequest_overriddingSpotPrice(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.522395050.spot_price", "0.01"),
|
"aws_spot_fleet_request.foo", "launch_specification.4143232216.spot_price", "0.01"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.522395050.instance_type", "m3.large"),
|
"aws_spot_fleet_request.foo", "launch_specification.4143232216.instance_type", "m3.large"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.spot_price", ""), //there will not be a value here since it's not overriding
|
"aws_spot_fleet_request.foo", "launch_specification.335709043.spot_price", ""), //there will not be a value here since it's not overriding
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"),
|
"aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -289,13 +289,13 @@ func TestAccAWSSpotFleetRequest_withWeightedCapacity(t *testing.T) {
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.2325690000.weighted_capacity", "3"),
|
"aws_spot_fleet_request.foo", "launch_specification.4120185872.weighted_capacity", "3"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.2325690000.instance_type", "r3.large"),
|
"aws_spot_fleet_request.foo", "launch_specification.4120185872.instance_type", "r3.large"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.weighted_capacity", "6"),
|
"aws_spot_fleet_request.foo", "launch_specification.590403189.weighted_capacity", "6"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"),
|
"aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -266,6 +266,29 @@ func readInstance(d *schema.ResourceData, meta interface{}) error {
|
||||||
if err := readBlockDevices(d, instance, conn); err != nil {
|
if err := readBlockDevices(d, instance, conn); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ipv6Addresses []string
|
||||||
|
if len(instance.NetworkInterfaces) > 0 {
|
||||||
|
for _, ni := range instance.NetworkInterfaces {
|
||||||
|
if *ni.Attachment.DeviceIndex == 0 {
|
||||||
|
d.Set("subnet_id", ni.SubnetId)
|
||||||
|
d.Set("network_interface_id", ni.NetworkInterfaceId)
|
||||||
|
d.Set("associate_public_ip_address", ni.Association != nil)
|
||||||
|
d.Set("ipv6_address_count", len(ni.Ipv6Addresses))
|
||||||
|
|
||||||
|
for _, address := range ni.Ipv6Addresses {
|
||||||
|
ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d.Set("subnet_id", instance.SubnetId)
|
||||||
|
d.Set("network_interface_id", "")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil {
|
||||||
|
log.Printf("[WARN] Error setting ipv6_addresses for AWS Spot Instance (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -77,6 +77,25 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{})
|
||||||
|
|
||||||
vols, err := conn.DescribeVolumes(request)
|
vols, err := conn.DescribeVolumes(request)
|
||||||
if (err != nil) || (len(vols.Volumes) == 0) {
|
if (err != nil) || (len(vols.Volumes) == 0) {
|
||||||
|
// This handles the situation where the instance is created by
|
||||||
|
// a spot request and whilst the request has been fulfilled the
|
||||||
|
// instance is not running yet
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"pending"},
|
||||||
|
Target: []string{"running"},
|
||||||
|
Refresh: InstanceStateRefreshFunc(conn, iID),
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
Delay: 10 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for instance (%s) to become ready: %s",
|
||||||
|
iID, err)
|
||||||
|
}
|
||||||
|
|
||||||
// not attached
|
// not attached
|
||||||
opts := &ec2.AttachVolumeInput{
|
opts := &ec2.AttachVolumeInput{
|
||||||
Device: aws.String(name),
|
Device: aws.String(name),
|
||||||
|
|
|
@ -216,6 +216,12 @@ func expandIPPerms(
|
||||||
perm.IpRanges = append(perm.IpRanges, &ec2.IpRange{CidrIp: aws.String(v.(string))})
|
perm.IpRanges = append(perm.IpRanges, &ec2.IpRange{CidrIp: aws.String(v.(string))})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if raw, ok := m["ipv6_cidr_blocks"]; ok {
|
||||||
|
list := raw.([]interface{})
|
||||||
|
for _, v := range list {
|
||||||
|
perm.Ipv6Ranges = append(perm.Ipv6Ranges, &ec2.Ipv6Range{CidrIpv6: aws.String(v.(string))})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if raw, ok := m["prefix_list_ids"]; ok {
|
if raw, ok := m["prefix_list_ids"]; ok {
|
||||||
list := raw.([]interface{})
|
list := raw.([]interface{})
|
||||||
|
|
|
@ -2,6 +2,8 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,3 +26,17 @@ func isBase64Encoded(data []byte) bool {
|
||||||
func looksLikeJsonString(s interface{}) bool {
|
func looksLikeJsonString(s interface{}) bool {
|
||||||
return regexp.MustCompile(`^\s*{`).MatchString(s.(string))
|
return regexp.MustCompile(`^\s*{`).MatchString(s.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func jsonBytesEqual(b1, b2 []byte) bool {
|
||||||
|
var o1 interface{}
|
||||||
|
if err := json.Unmarshal(b1, &o1); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var o2 interface{}
|
||||||
|
if err := json.Unmarshal(b2, &o2); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return reflect.DeepEqual(o1, o2)
|
||||||
|
}
|
||||||
|
|
|
@ -32,3 +32,41 @@ func TestLooksLikeJsonString(t *testing.T) {
|
||||||
t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson)
|
t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestJsonBytesEqualQuotedAndUnquoted(t *testing.T) {
|
||||||
|
unquoted := `{"test": "test"}`
|
||||||
|
quoted := "{\"test\": \"test\"}"
|
||||||
|
|
||||||
|
if !jsonBytesEqual([]byte(unquoted), []byte(quoted)) {
|
||||||
|
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", unquoted, quoted)
|
||||||
|
}
|
||||||
|
|
||||||
|
unquotedDiff := `{"test": "test"}`
|
||||||
|
quotedDiff := "{\"test\": \"tested\"}"
|
||||||
|
|
||||||
|
if jsonBytesEqual([]byte(unquotedDiff), []byte(quotedDiff)) {
|
||||||
|
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", unquotedDiff, quotedDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJsonBytesEqualWhitespaceAndNoWhitespace(t *testing.T) {
|
||||||
|
noWhitespace := `{"test":"test"}`
|
||||||
|
whitespace := `
|
||||||
|
{
|
||||||
|
"test": "test"
|
||||||
|
}`
|
||||||
|
|
||||||
|
if !jsonBytesEqual([]byte(noWhitespace), []byte(whitespace)) {
|
||||||
|
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", noWhitespace, whitespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
noWhitespaceDiff := `{"test":"test"}`
|
||||||
|
whitespaceDiff := `
|
||||||
|
{
|
||||||
|
"test": "tested"
|
||||||
|
}`
|
||||||
|
|
||||||
|
if jsonBytesEqual([]byte(noWhitespaceDiff), []byte(whitespaceDiff)) {
|
||||||
|
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -930,3 +930,22 @@ func validateConfigExecutionFrequency(v interface{}, k string) (ws []string, err
|
||||||
k, frequency, validFrequencies))
|
k, frequency, validFrequencies))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateAccountAlias(v interface{}, k string) (ws []string, es []error) {
|
||||||
|
val := v.(string)
|
||||||
|
|
||||||
|
if (len(val) < 3) || (len(val) > 63) {
|
||||||
|
es = append(es, fmt.Errorf("%q must contain from 3 to 63 alphanumeric characters or hyphens", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile("^[a-z0-9][a-z0-9-]+$").MatchString(val) {
|
||||||
|
es = append(es, fmt.Errorf("%q must start with an alphanumeric character and only contain lowercase alphanumeric characters and hyphens", k))
|
||||||
|
}
|
||||||
|
if strings.Contains(val, "--") {
|
||||||
|
es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(val, "-") {
|
||||||
|
es = append(es, fmt.Errorf("%q must not end in a hyphen", k))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
@ -1550,3 +1550,32 @@ func TestValidateDmsReplicationTaskId(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateAccountAlias(t *testing.T) {
|
||||||
|
validAliases := []string{
|
||||||
|
"tf-alias",
|
||||||
|
"0tf-alias1",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range validAliases {
|
||||||
|
_, errors := validateAccountAlias(s, "account_alias")
|
||||||
|
if len(errors) > 0 {
|
||||||
|
t.Fatalf("%q should be a valid account alias: %v", s, errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
invalidAliases := []string{
|
||||||
|
"tf",
|
||||||
|
"-tf",
|
||||||
|
"tf-",
|
||||||
|
"TF-Alias",
|
||||||
|
"tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range invalidAliases {
|
||||||
|
_, errors := validateAccountAlias(s, "account_alias")
|
||||||
|
if len(errors) == 0 {
|
||||||
|
t.Fatalf("%q should not be a valid account alias: %v", s, errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package azurerm
|
package azurerm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -191,6 +194,12 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
|
||||||
|
|
||||||
client.StopContext = p.StopContext()
|
client.StopContext = p.StopContext()
|
||||||
|
|
||||||
|
// replaces the context between tests
|
||||||
|
p.MetaReset = func() error {
|
||||||
|
client.StopContext = p.StopContext()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// List all the available providers and their registration state to avoid unnecessary
|
// List all the available providers and their registration state to avoid unnecessary
|
||||||
// requests. This also lets us check if the provider credentials are correct.
|
// requests. This also lets us check if the provider credentials are correct.
|
||||||
providerList, err := client.providers.List(nil, "")
|
providerList, err := client.providers.List(nil, "")
|
||||||
|
@ -323,3 +332,31 @@ func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool
|
||||||
func ignoreCaseStateFunc(val interface{}) string {
|
func ignoreCaseStateFunc(val interface{}) string {
|
||||||
return strings.ToLower(val.(string))
|
return strings.ToLower(val.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func userDataStateFunc(v interface{}) string {
|
||||||
|
switch s := v.(type) {
|
||||||
|
case string:
|
||||||
|
s = base64Encode(s)
|
||||||
|
hash := sha1.Sum([]byte(s))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base64Encode encodes data if the input isn't already encoded using
|
||||||
|
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
|
||||||
|
// return the original input unchanged.
|
||||||
|
func base64Encode(data string) string {
|
||||||
|
// Check whether the data is already Base64 encoded; don't double-encode
|
||||||
|
if isBase64Encoded(data) {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
// data has not been encoded encode and return
|
||||||
|
return base64.StdEncoding.EncodeToString([]byte(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBase64Encoded(data string) bool {
|
||||||
|
_, err := base64.StdEncoding.DecodeString(data)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue