Merge remote-tracking branch 'upstream/master' into gcp_compute_disk_snapshot
This commit is contained in:
commit
731fceaae5
13
.travis.yml
13
.travis.yml
|
@ -1,7 +1,20 @@
|
|||
dist: trusty
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.8
|
||||
|
||||
env:
|
||||
- CONSUL_VERSION=0.7.5 TF_CONSUL_TEST=1 GOMAXPROCS=4
|
||||
|
||||
# Fetch consul for the backend and provider tests
|
||||
before_install:
|
||||
- curl -sLo consul.zip https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip
|
||||
- unzip consul.zip
|
||||
- mkdir ~/bin
|
||||
- mv consul ~/bin
|
||||
- export PATH="~/bin:$PATH"
|
||||
|
||||
install:
|
||||
# This script is used by the Travis build to install a cookie for
|
||||
# go.googlesource.com so rate limits are higher when using `go get` to fetch
|
||||
|
|
164
CHANGELOG.md
164
CHANGELOG.md
|
@ -1,10 +1,166 @@
|
|||
**TEMPORARY NOTE:** The "master" branch CHANGELOG also includes any changes
|
||||
in the branch "0-8-stable". The "master" branch is currently a development
|
||||
branch for the next major version of Terraform.
|
||||
## 0.9.1 (unreleased)
|
||||
|
||||
## 0.9.0-beta3 (unreleased)
|
||||
BACKWARDS IMCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/pagerduty: the deprecated `name_regex` field has been removed from vendor data source [GH-12396]
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **New Provider:** `kubernetes` [GH-12372]
|
||||
* **New Resource:** `kubernetes_namespace` [GH-12372]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* provider/aws: Get the aws_lambda_function attributes when there are great than 50 versions of a function [GH-11745]
|
||||
* provider/google: Fix the Google provider asking for account_file input on every run [GH-12729]
|
||||
|
||||
|
||||
## 0.9.0 (March 15, 2017)
|
||||
|
||||
**This is the complete 0.8.8 to 0.9 CHANGELOG. Below this section we also have a 0.9.0-beta2 to 0.9.0 final CHANGELOG.**
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503))
|
||||
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive
|
||||
* provider/azurerm: sql_server `administrator_login_password` now marked as sensitive
|
||||
* provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token
|
||||
* provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044))
|
||||
* provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223))
|
||||
* provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663))
|
||||
* provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668))
|
||||
* provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659))
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **Remote Backends:** This is a successor to "remote state" and includes
|
||||
file-based configuration, an improved setup process (just run `terraform init`),
|
||||
no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286))
|
||||
* **Destroy Provisioners:** Provisioners can now be configured to run
|
||||
on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329))
|
||||
* **State Locking:** State will be automatically locked when supported by the backend.
|
||||
Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187))
|
||||
* **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration.
|
||||
* **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||
* **New Data Source:** `openstack_networking_network_v2` ([#12304](https://github.com/hashicorp/terraform/issues/12304))
|
||||
* **New Resource:** `aws_iam_account_alias` ([#12648](https://github.com/hashicorp/terraform/issues/12648))
|
||||
* **New Resource:** `datadog_downtime` ([#10994](https://github.com/hashicorp/terraform/issues/10994))
|
||||
* **New Resource:** `ns1_notifylist` ([#12373](https://github.com/hashicorp/terraform/issues/12373))
|
||||
* **New Resource:** `google_container_node_pool` ([#11802](https://github.com/hashicorp/terraform/issues/11802))
|
||||
* **New Resource:** `rancher_certificate` ([#12717](https://github.com/hashicorp/terraform/issues/12717))
|
||||
* **New Resource:** `rancher_host` ([#11545](https://github.com/hashicorp/terraform/issues/11545))
|
||||
* helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311))
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482))
|
||||
* core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929))
|
||||
* core: report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383))
|
||||
* command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922))
|
||||
* command/init: previous behavior is retained, but init now also configures
|
||||
the new remote backends as well as downloads modules. It is the single
|
||||
command to initialize a new or existing Terraform configuration.
|
||||
* command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261))
|
||||
* provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188))
|
||||
* provider/aws: Return errors from Elastic Beanstalk ([#12425](https://github.com/hashicorp/terraform/issues/12425))
|
||||
* provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668))
|
||||
* provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694))
|
||||
* provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695))
|
||||
* provider/aws: Add support for ACM certificates to `api_gateway_domain_name` ([#12592](https://github.com/hashicorp/terraform/issues/12592))
|
||||
* provider/aws: Add support for IPv6 to aws\_security\_group\_rule ([#12645](https://github.com/hashicorp/terraform/issues/12645))
|
||||
* provider/aws: Add IPv6 Support to aws\_route\_table ([#12640](https://github.com/hashicorp/terraform/issues/12640))
|
||||
* provider/aws: Add support for IPv6 to aws\_network\_acl\_rule ([#12644](https://github.com/hashicorp/terraform/issues/12644))
|
||||
* provider/aws: Add support for IPv6 to aws\_default\_route\_table ([#12642](https://github.com/hashicorp/terraform/issues/12642))
|
||||
* provider/aws: Add support for IPv6 to aws\_network\_acl ([#12641](https://github.com/hashicorp/terraform/issues/12641))
|
||||
* provider/aws: Add support for IPv6 in aws\_route ([#12639](https://github.com/hashicorp/terraform/issues/12639))
|
||||
* provider/aws: Add support for IPv6 to aws\_security\_group ([#12655](https://github.com/hashicorp/terraform/issues/12655))
|
||||
* provider/aws: Add replace\_unhealthy\_instances to spot\_fleet\_request ([#12681](https://github.com/hashicorp/terraform/issues/12681))
|
||||
* provider/aws: Remove restriction on running aws\_opsworks\_* on us-east-1 ([#12688](https://github.com/hashicorp/terraform/issues/12688))
|
||||
* provider/aws: Improve error message on S3 Bucket Object deletion ([#12712](https://github.com/hashicorp/terraform/issues/12712))
|
||||
* provider/aws: Add log message about if changes are being applied now or later ([#12691](https://github.com/hashicorp/terraform/issues/12691))
|
||||
* provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982))
|
||||
* provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004))
|
||||
* provider/azurerm: Add support for managed availability sets. ([#12532](https://github.com/hashicorp/terraform/issues/12532))
|
||||
* provider/azurerm: Add support for extensions on virtual machine scale sets ([#12124](https://github.com/hashicorp/terraform/issues/12124))
|
||||
* provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760))
|
||||
* provider/docker: added support for linux capabilities ([#12045](https://github.com/hashicorp/terraform/issues/12045))
|
||||
* provider/fastly: Add Fastly SSL validation fields ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||
* provider/ignition: Migrate all of the igition resources to data sources ([#11851](https://github.com/hashicorp/terraform/issues/11851))
|
||||
* provider/openstack: Set Availability Zone in Instances ([#12610](https://github.com/hashicorp/terraform/issues/12610))
|
||||
* provider/openstack: Force Deletion of Instances ([#12689](https://github.com/hashicorp/terraform/issues/12689))
|
||||
* provider/rancher: Better comparison of compose files ([#12561](https://github.com/hashicorp/terraform/issues/12561))
|
||||
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* provider/vault: read vault token from `~/.vault-token` as a fallback for the
|
||||
`VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529))
|
||||
* provisioners: All provisioners now respond very quickly to interrupts for
|
||||
fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050))
|
||||
* core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210))
|
||||
* provider/aws: Fixes issue for aws_lb_ssl_negotiation_policy of already deleted ELB ([#12360](https://github.com/hashicorp/terraform/issues/12360))
|
||||
* provider/aws: Populate the iam_instance_profile uniqueId ([#12449](https://github.com/hashicorp/terraform/issues/12449))
|
||||
* provider/aws: Only send iops when creating io1 devices ([#12392](https://github.com/hashicorp/terraform/issues/12392))
|
||||
* provider/aws: Fix spurious aws_spot_fleet_request diffs ([#12437](https://github.com/hashicorp/terraform/issues/12437))
|
||||
* provider/aws: Changing volumes in ECS task definition should force new revision ([#11403](https://github.com/hashicorp/terraform/issues/11403))
|
||||
* provider/aws: Ignore whitespace in json diff for aws_dms_replication_task options ([#12380](https://github.com/hashicorp/terraform/issues/12380))
|
||||
* provider/aws: Check spot instance is running before trying to attach volumes ([#12459](https://github.com/hashicorp/terraform/issues/12459))
|
||||
* provider/aws: Add the IPV6 cidr block to the vpc datasource ([#12529](https://github.com/hashicorp/terraform/issues/12529))
|
||||
* provider/aws: Error on trying to recreate an existing customer gateway ([#12501](https://github.com/hashicorp/terraform/issues/12501))
|
||||
* provider/aws: Prevent aws_dms_replication_task panic ([#12539](https://github.com/hashicorp/terraform/issues/12539))
|
||||
* provider/aws: output the task definition name when errors occur during refresh ([#12609](https://github.com/hashicorp/terraform/issues/12609))
|
||||
* provider/aws: Refresh iam saml provider from state on 404 ([#12602](https://github.com/hashicorp/terraform/issues/12602))
|
||||
* provider/aws: Add address, port, hosted_zone_id and endpoint for aws_db_instance datasource ([#12623](https://github.com/hashicorp/terraform/issues/12623))
|
||||
* provider/aws: Allow recreation of `aws_opsworks_user_profile` when the `user_arn` is changed ([#12595](https://github.com/hashicorp/terraform/issues/12595))
|
||||
* provider/aws: Guard clause to prevent panic on ELB connectionSettings ([#12685](https://github.com/hashicorp/terraform/issues/12685))
|
||||
* provider/azurerm: bug fix to prevent crashes during azurerm_container_service provisioning ([#12516](https://github.com/hashicorp/terraform/issues/12516))
|
||||
* provider/cobbler: Fix Profile Repos ([#12452](https://github.com/hashicorp/terraform/issues/12452))
|
||||
* provider/datadog: Update to datadog_monitor to use default values ([#12497](https://github.com/hashicorp/terraform/issues/12497))
|
||||
* provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903))
|
||||
* provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336))
|
||||
* provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387))
|
||||
* provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662))
|
||||
* provider/google: Minor correction : "Deleting disk" message in Delete method ([#12521](https://github.com/hashicorp/terraform/issues/12521))
|
||||
* provider/mysql: Avoid crash on un-interpolated provider cfg ([#12391](https://github.com/hashicorp/terraform/issues/12391))
|
||||
* provider/ns1: Fix incorrect schema (causing crash) for 'ns1_user.notify' ([#12721](https://github.com/hashicorp/terraform/issues/12721))
|
||||
* provider/openstack: Handle cases where volumes are disabled ([#12374](https://github.com/hashicorp/terraform/issues/12374))
|
||||
* provider/openstack: Toggle Creation of Default Security Group Rules ([#12119](https://github.com/hashicorp/terraform/issues/12119))
|
||||
* provider/openstack: Change Port fixed_ip to a Set ([#12613](https://github.com/hashicorp/terraform/issues/12613))
|
||||
* provider/openstack: Add network_id to Network data source ([#12615](https://github.com/hashicorp/terraform/issues/12615))
|
||||
* provider/openstack: Check for ErrDefault500 when creating/deleting pool member ([#12664](https://github.com/hashicorp/terraform/issues/12664))
|
||||
* provider/rancher: Apply the set value for finish_upgrade to set to prevent recurring plans ([#12545](https://github.com/hashicorp/terraform/issues/12545))
|
||||
* provider/scaleway: work around API concurrency issue ([#12707](https://github.com/hashicorp/terraform/issues/12707))
|
||||
* provider/statuscake: use default status code list when updating test ([#12375](https://github.com/hashicorp/terraform/issues/12375))
|
||||
|
||||
## 0.9.0 from 0.9.0-beta2 (March 15, 2017)
|
||||
|
||||
**This only includes changes from 0.9.0-beta2 to 0.9.0 final. The section above has the complete 0.8.x to 0.9.0 CHANGELOG.**
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503))
|
||||
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223))
|
||||
* provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663))
|
||||
* provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668))
|
||||
* provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659))
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336))
|
||||
* provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387))
|
||||
* provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662))
|
||||
|
||||
## 0.9.0-beta2 (March 2, 2017)
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -38,7 +38,7 @@ plugin-dev: generate
|
|||
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
|
||||
|
||||
# test runs the unit tests
|
||||
test:# fmtcheck errcheck generate
|
||||
test: fmtcheck errcheck generate
|
||||
go test -i $(TEST) || exit 1
|
||||
echo $(TEST) | \
|
||||
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Terraform
|
||||
=========
|
||||
|
||||
- Website: http://www.terraform.io
|
||||
- Website: https://www.terraform.io
|
||||
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
|
||||
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
|
||||
|
||||
|
@ -29,7 +29,7 @@ All documentation is available on the [Terraform website](http://www.terraform.i
|
|||
Developing Terraform
|
||||
--------------------
|
||||
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.7+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||
|
||||
For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ func (b *Local) States() ([]string, error) {
|
|||
// the listing always start with "default"
|
||||
envs := []string{backend.DefaultStateName}
|
||||
|
||||
entries, err := ioutil.ReadDir(DefaultEnvDir)
|
||||
entries, err := ioutil.ReadDir(b.stateEnvDir())
|
||||
// no error if there's no envs configured
|
||||
if os.IsNotExist(err) {
|
||||
return envs, nil
|
||||
|
@ -166,7 +166,7 @@ func (b *Local) DeleteState(name string) error {
|
|||
}
|
||||
|
||||
delete(b.states, name)
|
||||
return os.RemoveAll(filepath.Join(DefaultEnvDir, name))
|
||||
return os.RemoveAll(filepath.Join(b.stateEnvDir(), name))
|
||||
}
|
||||
|
||||
func (b *Local) State(name string) (state.State, error) {
|
||||
|
@ -320,17 +320,12 @@ func (b *Local) StatePaths(name string) (string, string, string) {
|
|||
name = backend.DefaultStateName
|
||||
}
|
||||
|
||||
envDir := DefaultEnvDir
|
||||
if b.StateEnvDir != "" {
|
||||
envDir = b.StateEnvDir
|
||||
}
|
||||
|
||||
if name == backend.DefaultStateName {
|
||||
if statePath == "" {
|
||||
statePath = DefaultStateFilename
|
||||
}
|
||||
} else {
|
||||
statePath = filepath.Join(envDir, name, DefaultStateFilename)
|
||||
statePath = filepath.Join(b.stateEnvDir(), name, DefaultStateFilename)
|
||||
}
|
||||
|
||||
if stateOutPath == "" {
|
||||
|
@ -353,12 +348,7 @@ func (b *Local) createState(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
envDir := DefaultEnvDir
|
||||
if b.StateEnvDir != "" {
|
||||
envDir = b.StateEnvDir
|
||||
}
|
||||
|
||||
stateDir := filepath.Join(envDir, name)
|
||||
stateDir := filepath.Join(b.stateEnvDir(), name)
|
||||
s, err := os.Stat(stateDir)
|
||||
if err == nil && s.IsDir() {
|
||||
// no need to check for os.IsNotExist, since that is covered by os.MkdirAll
|
||||
|
@ -374,6 +364,15 @@ func (b *Local) createState(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// stateEnvDir returns the directory where state environments are stored.
|
||||
func (b *Local) stateEnvDir() string {
|
||||
if b.StateEnvDir != "" {
|
||||
return b.StateEnvDir
|
||||
}
|
||||
|
||||
return DefaultEnvDir
|
||||
}
|
||||
|
||||
// currentStateName returns the name of the current named state as set in the
|
||||
// configuration files.
|
||||
// If there are no configured environments, currentStateName returns "default"
|
||||
|
|
|
@ -20,6 +20,11 @@ func TestLocal_impl(t *testing.T) {
|
|||
var _ backend.CLI = new(Local)
|
||||
}
|
||||
|
||||
func TestLocal_backend(t *testing.T) {
|
||||
b := TestLocal(t)
|
||||
backend.TestBackend(t, b, b)
|
||||
}
|
||||
|
||||
func checkState(t *testing.T, path, expected string) {
|
||||
// Read the state
|
||||
f, err := os.Open(path)
|
||||
|
|
|
@ -21,6 +21,7 @@ func TestLocal(t *testing.T) *Local {
|
|||
StatePath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
|
||||
StateEnvDir: filepath.Join(tempDir, "state.tfstate.d"),
|
||||
ContextOpts: &terraform.ContextOpts{},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,20 @@ func New() backend.Backend {
|
|||
Description: "HTTP Auth in the format of 'username:password'",
|
||||
Default: "", // To prevent input
|
||||
},
|
||||
|
||||
"gzip": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Compress the state data using gzip",
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"lock": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Lock state access",
|
||||
Default: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -64,13 +78,18 @@ func New() backend.Backend {
|
|||
type Backend struct {
|
||||
*schema.Backend
|
||||
|
||||
// The fields below are set from configure
|
||||
configData *schema.ResourceData
|
||||
lock bool
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
// Grab the resource data
|
||||
b.configData = schema.FromContextBackendConfig(ctx)
|
||||
|
||||
// Store the lock information
|
||||
b.lock = b.configData.Get("lock").(bool)
|
||||
|
||||
// Initialize a client to test config
|
||||
_, err := b.clientRaw()
|
||||
return err
|
||||
|
|
|
@ -85,27 +85,39 @@ func (b *Backend) State(name string) (state.State, error) {
|
|||
// Determine the path of the data
|
||||
path := b.path(name)
|
||||
|
||||
// Determine whether to gzip or not
|
||||
gzip := b.configData.Get("gzip").(bool)
|
||||
|
||||
// Build the state client
|
||||
stateMgr := &remote.State{
|
||||
var stateMgr state.State = &remote.State{
|
||||
Client: &RemoteClient{
|
||||
Client: client,
|
||||
Path: path,
|
||||
GZip: gzip,
|
||||
},
|
||||
}
|
||||
|
||||
// If we're not locking, disable it
|
||||
if !b.lock {
|
||||
stateMgr = &state.LockDisabled{Inner: stateMgr}
|
||||
}
|
||||
|
||||
// Get the locker, which we know always exists
|
||||
stateMgrLocker := stateMgr.(state.Locker)
|
||||
|
||||
// Grab a lock, we use this to write an empty state if one doesn't
|
||||
// exist already. We have to write an empty state as a sentinel value
|
||||
// so States() knows it exists.
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := stateMgr.Lock(lockInfo)
|
||||
lockId, err := stateMgrLocker.Lock(lockInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to lock state in Consul: %s", err)
|
||||
}
|
||||
|
||||
// Local helper function so we can call it multiple places
|
||||
lockUnlock := func(parent error) error {
|
||||
if err := stateMgr.Unlock(lockId); err != nil {
|
||||
if err := stateMgrLocker.Unlock(lockId); err != nil {
|
||||
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,10 +2,12 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
)
|
||||
|
||||
|
@ -13,19 +15,80 @@ func TestBackend_impl(t *testing.T) {
|
|||
var _ backend.Backend = new(Backend)
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
t.Log("consul tests require CONSUL_HTTP_ADDR")
|
||||
func newConsulTestServer(t *testing.T) *testutil.TestServer {
|
||||
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == ""
|
||||
if skip {
|
||||
t.Log("consul server tests require setting TF_ACC or TF_CONSUL_TEST")
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
srv := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
|
||||
c.LogLevel = "warn"
|
||||
|
||||
if !testing.Verbose() {
|
||||
c.Stdout = ioutil.Discard
|
||||
c.Stderr = ioutil.Discard
|
||||
}
|
||||
})
|
||||
|
||||
return srv
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// Get the backend. We need two to test locking.
|
||||
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})
|
||||
|
||||
// Test
|
||||
backend.TestBackend(t, b)
|
||||
backend.TestBackend(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBackend_lockDisabled(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// Get the backend. We need two to test locking.
|
||||
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
"lock": false,
|
||||
})
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path + "different", // Diff so locking test would fail if it was locking
|
||||
"lock": false,
|
||||
})
|
||||
|
||||
// Test
|
||||
backend.TestBackend(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBackend_gzip(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
"gzip": true,
|
||||
})
|
||||
|
||||
// Test
|
||||
backend.TestBackend(t, b, nil)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
@ -22,6 +24,7 @@ const (
|
|||
type RemoteClient struct {
|
||||
Client *consulapi.Client
|
||||
Path string
|
||||
GZip bool
|
||||
|
||||
consulLock *consulapi.Lock
|
||||
lockCh <-chan struct{}
|
||||
|
@ -36,18 +39,37 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
payload := pair.Value
|
||||
// If the payload starts with 0x1f, it's gzip, not json
|
||||
if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' {
|
||||
if data, err := uncompressState(pair.Value); err == nil {
|
||||
payload = data
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
md5 := md5.Sum(pair.Value)
|
||||
return &remote.Payload{
|
||||
Data: pair.Value,
|
||||
Data: payload,
|
||||
MD5: md5[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
payload := data
|
||||
if c.GZip {
|
||||
if compressedState, err := compressState(data); err == nil {
|
||||
payload = compressedState
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
kv := c.Client.KV()
|
||||
_, err := kv.Put(&consulapi.KVPair{
|
||||
Key: c.Path,
|
||||
Value: data,
|
||||
Value: payload,
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
@ -177,3 +199,31 @@ func (c *RemoteClient) Unlock(id string) error {
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
func compressState(data []byte) ([]byte, error) {
|
||||
b := new(bytes.Buffer)
|
||||
gz := gzip.NewWriter(b)
|
||||
if _, err := gz.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func uncompressState(data []byte) ([]byte, error) {
|
||||
b := new(bytes.Buffer)
|
||||
gz, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.ReadFrom(gz)
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -16,15 +15,12 @@ func TestRemoteClient_impl(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
t.Log("consul tests require CONSUL_HTTP_ADDR")
|
||||
t.Skip()
|
||||
}
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"address": srv.HTTPAddr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
})
|
||||
|
||||
|
@ -38,18 +34,54 @@ func TestRemoteClient(t *testing.T) {
|
|||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestConsul_stateLock(t *testing.T) {
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
t.Log("consul lock tests require CONSUL_HTTP_ADDR")
|
||||
t.Skip()
|
||||
// test the gzip functionality of the client
|
||||
func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": statePath,
|
||||
})
|
||||
|
||||
// Grab the client
|
||||
state, err := b.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Test
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
|
||||
// create a new backend with gzip
|
||||
b = backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": statePath,
|
||||
"gzip": true,
|
||||
})
|
||||
|
||||
// Grab the client
|
||||
state, err = b.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Test
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestConsul_stateLock(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// create 2 instances to get 2 remote.Clients
|
||||
sA, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
}).State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
|
@ -57,7 +89,7 @@ func TestConsul_stateLock(t *testing.T) {
|
|||
}
|
||||
|
||||
sB, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
}).State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
|
@ -40,8 +41,15 @@ func TestBackendConfig(t *testing.T, b Backend, c map[string]interface{}) Backen
|
|||
// assumed to already be configured. This will test state functionality.
|
||||
// If the backend reports it doesn't support multi-state by returning the
|
||||
// error ErrNamedStatesNotSupported, then it will not test that.
|
||||
func TestBackend(t *testing.T, b Backend) {
|
||||
testBackendStates(t, b)
|
||||
//
|
||||
// If you want to test locking, two backends must be given. If b2 is nil,
|
||||
// then state lockign won't be tested.
|
||||
func TestBackend(t *testing.T, b1, b2 Backend) {
|
||||
testBackendStates(t, b1)
|
||||
|
||||
if b2 != nil {
|
||||
testBackendStateLock(t, b1, b2)
|
||||
}
|
||||
}
|
||||
|
||||
func testBackendStates(t *testing.T, b Backend) {
|
||||
|
@ -82,6 +90,10 @@ func testBackendStates(t *testing.T, b Backend) {
|
|||
// Verify they are distinct states
|
||||
{
|
||||
s := barState.State()
|
||||
if s == nil {
|
||||
s = terraform.NewState()
|
||||
}
|
||||
|
||||
s.Lineage = "bar"
|
||||
if err := barState.WriteState(s); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
|
@ -93,7 +105,7 @@ func testBackendStates(t *testing.T, b Backend) {
|
|||
if err := fooState.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
if v := fooState.State(); v.Lineage == "bar" {
|
||||
if v := fooState.State(); v != nil && v.Lineage == "bar" {
|
||||
t.Fatalf("bad: %#v", v)
|
||||
}
|
||||
}
|
||||
|
@ -138,3 +150,77 @@ func testBackendStates(t *testing.T, b Backend) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testBackendStateLock(t *testing.T, b1, b2 Backend) {
|
||||
// Get the default state for each
|
||||
b1StateMgr, err := b1.State(DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := b1StateMgr.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
// Fast exit if this doesn't support locking at all
|
||||
if _, ok := b1StateMgr.(state.Locker); !ok {
|
||||
t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1)
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("TestBackend: testing state locking for %T", b1)
|
||||
|
||||
b2StateMgr, err := b2.State(DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := b2StateMgr.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
// Reassign so its obvious whats happening
|
||||
lockerA := b1StateMgr.(state.Locker)
|
||||
lockerB := b2StateMgr.(state.Locker)
|
||||
|
||||
infoA := state.NewLockInfo()
|
||||
infoA.Operation = "test"
|
||||
infoA.Who = "clientA"
|
||||
|
||||
infoB := state.NewLockInfo()
|
||||
infoB.Operation = "test"
|
||||
infoB.Who = "clientB"
|
||||
|
||||
lockIDA, err := lockerA.Lock(infoA)
|
||||
if err != nil {
|
||||
t.Fatal("unable to get initial lock:", err)
|
||||
}
|
||||
|
||||
// If the lock ID is blank, assume locking is disabled
|
||||
if lockIDA == "" {
|
||||
t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = lockerB.Lock(infoB)
|
||||
if err == nil {
|
||||
lockerA.Unlock(lockIDA)
|
||||
t.Fatal("client B obtained lock while held by client A")
|
||||
}
|
||||
|
||||
if err := lockerA.Unlock(lockIDA); err != nil {
|
||||
t.Fatal("error unlocking client A", err)
|
||||
}
|
||||
|
||||
lockIDB, err := lockerB.Lock(infoB)
|
||||
if err != nil {
|
||||
t.Fatal("unable to obtain lock from client B")
|
||||
}
|
||||
|
||||
if lockIDB == lockIDA {
|
||||
t.Fatalf("duplicate lock IDs: %q", lockIDB)
|
||||
}
|
||||
|
||||
if err = lockerB.Unlock(lockIDB); err != nil {
|
||||
t.Fatal("error unlocking client B:", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -308,7 +308,7 @@ func (c *Config) Client() (interface{}, error) {
|
|||
client.kmsconn = kms.New(sess)
|
||||
client.lambdaconn = lambda.New(sess)
|
||||
client.lightsailconn = lightsail.New(usEast1Sess)
|
||||
client.opsworksconn = opsworks.New(usEast1Sess)
|
||||
client.opsworksconn = opsworks.New(sess)
|
||||
client.r53conn = route53.New(usEast1Sess)
|
||||
client.rdsconn = rds.New(sess)
|
||||
client.redshiftconn = redshift.New(sess)
|
||||
|
|
|
@ -20,6 +20,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
|||
ForceNew: true,
|
||||
},
|
||||
|
||||
"address": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"allocated_storage": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
@ -82,6 +87,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
|||
Computed: true,
|
||||
},
|
||||
|
||||
"endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"engine": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
@ -92,6 +102,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
|||
Computed: true,
|
||||
},
|
||||
|
||||
"hosted_zone_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"iops": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
@ -133,6 +148,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
|||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"preferred_backup_window": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
@ -232,6 +252,10 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error
|
|||
d.Set("master_username", dbInstance.MasterUsername)
|
||||
d.Set("monitoring_interval", dbInstance.MonitoringInterval)
|
||||
d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn)
|
||||
d.Set("address", dbInstance.Endpoint.Address)
|
||||
d.Set("port", dbInstance.Endpoint.Port)
|
||||
d.Set("hosted_zone_id", dbInstance.Endpoint.HostedZoneId)
|
||||
d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port))
|
||||
|
||||
var optionGroups []string
|
||||
for _, v := range dbInstance.OptionGroupMemberships {
|
||||
|
|
|
@ -28,6 +28,25 @@ func TestAccAWSDataDbInstance_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSDataDbInstance_endpoint(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSDBInstanceConfigWithDataSource(rInt),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "address"),
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "port"),
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "hosted_zone_id"),
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "endpoint"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccAWSDBInstanceConfigWithDataSource(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_db_instance" "bar" {
|
||||
|
|
|
@ -51,7 +51,7 @@ func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string))
|
||||
}
|
||||
|
||||
taskDefinition := *desc.TaskDefinition
|
||||
|
|
|
@ -335,7 +335,6 @@ resource "aws_instance" "foo" {
|
|||
root_block_device {
|
||||
volume_type = "gp2"
|
||||
volume_size = 11
|
||||
iops = 330
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,16 @@ func dataSourceAwsRouteTable() *schema.Resource {
|
|||
Computed: true,
|
||||
},
|
||||
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"egress_only_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
@ -177,6 +187,12 @@ func dataSourceRoutesRead(ec2Routes []*ec2.Route) []map[string]interface{} {
|
|||
if r.DestinationCidrBlock != nil {
|
||||
m["cidr_block"] = *r.DestinationCidrBlock
|
||||
}
|
||||
if r.DestinationIpv6CidrBlock != nil {
|
||||
m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock
|
||||
}
|
||||
if r.EgressOnlyInternetGatewayId != nil {
|
||||
m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId
|
||||
}
|
||||
if r.GatewayId != nil {
|
||||
m["gateway_id"] = *r.GatewayId
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ func TestAccDataSourceAwsRouteTable_basic(t *testing.T) {
|
|||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsRouteTableGroupConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"),
|
||||
|
@ -33,7 +33,7 @@ func TestAccDataSourceAwsRouteTable_main(t *testing.T) {
|
|||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsRouteTableMainRoute,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsRouteTableCheckMain("data.aws_route_table.by_filter"),
|
||||
|
|
|
@ -14,19 +14,19 @@ func dataSourceAwsVpc() *schema.Resource {
|
|||
Read: dataSourceAwsVpcRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"dhcp_options_id": &schema.Schema{
|
||||
"dhcp_options_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"default": &schema.Schema{
|
||||
"default": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -34,18 +34,28 @@ func dataSourceAwsVpc() *schema.Resource {
|
|||
|
||||
"filter": ec2CustomFiltersSchema(),
|
||||
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"instance_tenancy": &schema.Schema{
|
||||
"instance_tenancy": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"state": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ipv6_association_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"state": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -117,5 +127,10 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
|
|||
d.Set("state", vpc.State)
|
||||
d.Set("tags", tagsToMap(vpc.Tags))
|
||||
|
||||
if vpc.Ipv6CidrBlockAssociationSet != nil {
|
||||
d.Set("ipv6_association_id", vpc.Ipv6CidrBlockAssociationSet[0].AssociationId)
|
||||
d.Set("ipv6_cidr_block", vpc.Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,31 +2,60 @@ package aws
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccDataSourceAwsVpc_basic(t *testing.T) {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rInt := rand.Intn(16)
|
||||
cidr := fmt.Sprintf("172.%d.0.0/16", rInt)
|
||||
tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataSourceAwsVpcConfig,
|
||||
{
|
||||
Config: testAccDataSourceAwsVpcConfig(cidr, tag),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr", cidr, tag),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag", cidr, tag),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter", cidr, tag),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
||||
func TestAccDataSourceAwsVpc_ipv6Associated(t *testing.T) {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rInt := rand.Intn(16)
|
||||
cidr := fmt.Sprintf("172.%d.0.0/16", rInt)
|
||||
tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsVpcConfigIpv6(cidr, tag),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"data.aws_vpc.by_id", "ipv6_association_id"),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"data.aws_vpc.by_id", "ipv6_cidr_block"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccDataSourceAwsVpcCheck(name, cidr, tag string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
if !ok {
|
||||
|
@ -48,10 +77,10 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
|||
)
|
||||
}
|
||||
|
||||
if attr["cidr_block"] != "172.16.0.0/16" {
|
||||
return fmt.Errorf("bad cidr_block %s", attr["cidr_block"])
|
||||
if attr["cidr_block"] != cidr {
|
||||
return fmt.Errorf("bad cidr_block %s, expected: %s", attr["cidr_block"], cidr)
|
||||
}
|
||||
if attr["tags.Name"] != "terraform-testacc-vpc-data-source" {
|
||||
if attr["tags.Name"] != tag {
|
||||
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
|
||||
}
|
||||
|
||||
|
@ -59,16 +88,37 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
|||
}
|
||||
}
|
||||
|
||||
const testAccDataSourceAwsVpcConfig = `
|
||||
func testAccDataSourceAwsVpcConfigIpv6(cidr, tag string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "172.16.0.0/16"
|
||||
cidr_block = "%s"
|
||||
assign_generated_ipv6_cidr_block = true
|
||||
|
||||
tags {
|
||||
Name = "terraform-testacc-vpc-data-source"
|
||||
Name = "%s"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_vpc" "by_id" {
|
||||
id = "${aws_vpc.test.id}"
|
||||
}`, cidr, tag)
|
||||
}
|
||||
|
||||
func testAccDataSourceAwsVpcConfig(cidr, tag string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "%s"
|
||||
|
||||
tags {
|
||||
Name = "%s"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,5 +141,5 @@ data "aws_vpc" "by_filter" {
|
|||
name = "cidr"
|
||||
values = ["${aws_vpc.test.cidr_block}"]
|
||||
}
|
||||
}`, cidr, tag)
|
||||
}
|
||||
`
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
|
@ -42,3 +44,17 @@ func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData)
|
|||
// Throw a diff by default
|
||||
return false
|
||||
}
|
||||
|
||||
func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool {
|
||||
ob := bytes.NewBufferString("")
|
||||
if err := json.Compact(ob, []byte(old)); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
nb := bytes.NewBufferString("")
|
||||
if err := json.Compact(nb, []byte(new)); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return jsonBytesEqual(ob.Bytes(), nb.Bytes())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func TestSuppressEquivalentJsonDiffsWhitespaceAndNoWhitespace(t *testing.T) {
|
||||
d := new(schema.ResourceData)
|
||||
|
||||
noWhitespace := `{"test":"test"}`
|
||||
whitespace := `
|
||||
{
|
||||
"test": "test"
|
||||
}`
|
||||
|
||||
if !suppressEquivalentJsonDiffs("", noWhitespace, whitespace, d) {
|
||||
t.Errorf("Expected suppressEquivalentJsonDiffs to return true for %s == %s", noWhitespace, whitespace)
|
||||
}
|
||||
|
||||
noWhitespaceDiff := `{"test":"test"}`
|
||||
whitespaceDiff := `
|
||||
{
|
||||
"test": "tested"
|
||||
}`
|
||||
|
||||
if suppressEquivalentJsonDiffs("", noWhitespaceDiff, whitespaceDiff, d) {
|
||||
t.Errorf("Expected suppressEquivalentJsonDiffs to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAWSIAMAccountAlias_importBasic(t *testing.T) {
|
||||
resourceName := "aws_iam_account_alias.test"
|
||||
|
||||
rstring := acctest.RandString(5)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSIAMAccountAliasConfig(rstring),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -51,6 +51,7 @@ func resourceAwsRouteTableImportState(
|
|||
d.SetType("aws_route")
|
||||
d.Set("route_table_id", id)
|
||||
d.Set("destination_cidr_block", route.DestinationCidrBlock)
|
||||
d.Set("destination_ipv6_cidr_block", route.DestinationIpv6CidrBlock)
|
||||
d.SetId(routeIDHash(d, route))
|
||||
results = append(results, d)
|
||||
}
|
||||
|
|
|
@ -23,11 +23,11 @@ func TestAccAWSRouteTable_importBasic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfig,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_route_table.foo",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
|
@ -51,11 +51,11 @@ func TestAccAWSRouteTable_complex(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfig_complexImport,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_route_table.mod",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
|
|
|
@ -66,13 +66,20 @@ func resourceAwsSecurityGroupImportStatePerm(sg *ec2.SecurityGroup, ruleType str
|
|||
p := &ec2.IpPermission{
|
||||
FromPort: perm.FromPort,
|
||||
IpProtocol: perm.IpProtocol,
|
||||
IpRanges: perm.IpRanges,
|
||||
PrefixListIds: perm.PrefixListIds,
|
||||
ToPort: perm.ToPort,
|
||||
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{pair},
|
||||
}
|
||||
|
||||
if perm.Ipv6Ranges != nil {
|
||||
p.Ipv6Ranges = perm.Ipv6Ranges
|
||||
}
|
||||
|
||||
if perm.IpRanges != nil {
|
||||
p.IpRanges = perm.IpRanges
|
||||
}
|
||||
|
||||
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -23,11 +23,39 @@ func TestAccAWSSecurityGroup_importBasic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_security_group.web",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSSecurityGroup_importIpv6(t *testing.T) {
|
||||
checkFn := func(s []*terraform.InstanceState) error {
|
||||
// Expect 3: group, 2 rules
|
||||
if len(s) != 3 {
|
||||
return fmt.Errorf("expected 3 states: %#v", s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigIpv6,
|
||||
},
|
||||
|
||||
{
|
||||
ResourceName: "aws_security_group.web",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
|
@ -42,11 +70,11 @@ func TestAccAWSSecurityGroup_importSelf(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_importSelf,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_security_group.allow_all",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
|
@ -61,11 +89,11 @@ func TestAccAWSSecurityGroup_importSourceSecurityGroup(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_importSourceSecurityGroup,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_security_group.test_group_1",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
|
|
|
@ -32,7 +32,14 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
|
|||
Egress: aws.Bool(entryType == "egress"),
|
||||
RuleAction: aws.String(data["action"].(string)),
|
||||
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
|
||||
CidrBlock: aws.String(data["cidr_block"].(string)),
|
||||
}
|
||||
|
||||
if v, ok := data["ipv6_cidr_block"]; ok {
|
||||
e.Ipv6CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["cidr_block"]; ok {
|
||||
e.CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
|
||||
// Specify additional required fields for ICMP
|
||||
|
@ -55,14 +62,24 @@ func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interfac
|
|||
entries := make([]map[string]interface{}, 0, len(list))
|
||||
|
||||
for _, entry := range list {
|
||||
entries = append(entries, map[string]interface{}{
|
||||
"from_port": *entry.PortRange.From,
|
||||
"to_port": *entry.PortRange.To,
|
||||
"action": *entry.RuleAction,
|
||||
"rule_no": *entry.RuleNumber,
|
||||
"protocol": *entry.Protocol,
|
||||
"cidr_block": *entry.CidrBlock,
|
||||
})
|
||||
|
||||
newEntry := map[string]interface{}{
|
||||
"from_port": *entry.PortRange.From,
|
||||
"to_port": *entry.PortRange.To,
|
||||
"action": *entry.RuleAction,
|
||||
"rule_no": *entry.RuleNumber,
|
||||
"protocol": *entry.Protocol,
|
||||
}
|
||||
|
||||
if entry.CidrBlock != nil {
|
||||
newEntry["cidr_block"] = *entry.CidrBlock
|
||||
}
|
||||
|
||||
if entry.Ipv6CidrBlock != nil {
|
||||
newEntry["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
|
||||
}
|
||||
|
||||
entries = append(entries, newEntry)
|
||||
}
|
||||
|
||||
return entries
|
||||
|
|
|
@ -298,6 +298,7 @@ func Provider() terraform.ResourceProvider {
|
|||
"aws_flow_log": resourceAwsFlowLog(),
|
||||
"aws_glacier_vault": resourceAwsGlacierVault(),
|
||||
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
||||
"aws_iam_account_alias": resourceAwsIamAccountAlias(),
|
||||
"aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(),
|
||||
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
||||
"aws_iam_group": resourceAwsIamGroup(),
|
||||
|
|
|
@ -21,27 +21,34 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
|||
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
||||
//According to AWS Documentation, ACM will be the only way to add certificates
|
||||
//to ApiGateway DomainNames. When this happens, we will be deprecating all certificate methods
|
||||
//except certificate_arn. We are not quite sure when this will happen.
|
||||
"certificate_body": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"certificate_chain": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"certificate_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"certificate_private_key": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"domain_name": {
|
||||
|
@ -50,6 +57,12 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
|||
ForceNew: true,
|
||||
},
|
||||
|
||||
"certificate_arn": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_body", "certificate_chain", "certificate_name", "certificate_private_key"},
|
||||
},
|
||||
|
||||
"cloudfront_domain_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
@ -72,13 +85,31 @@ func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interfac
|
|||
conn := meta.(*AWSClient).apigateway
|
||||
log.Printf("[DEBUG] Creating API Gateway Domain Name")
|
||||
|
||||
domainName, err := conn.CreateDomainName(&apigateway.CreateDomainNameInput{
|
||||
CertificateBody: aws.String(d.Get("certificate_body").(string)),
|
||||
CertificateChain: aws.String(d.Get("certificate_chain").(string)),
|
||||
CertificateName: aws.String(d.Get("certificate_name").(string)),
|
||||
CertificatePrivateKey: aws.String(d.Get("certificate_private_key").(string)),
|
||||
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||
})
|
||||
params := &apigateway.CreateDomainNameInput{
|
||||
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_arn"); ok {
|
||||
params.CertificateArn = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_name"); ok {
|
||||
params.CertificateName = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_body"); ok {
|
||||
params.CertificateBody = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_chain"); ok {
|
||||
params.CertificateChain = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_private_key"); ok {
|
||||
params.CertificatePrivateKey = aws.String(v.(string))
|
||||
}
|
||||
|
||||
domainName, err := conn.CreateDomainName(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating API Gateway Domain Name: %s", err)
|
||||
}
|
||||
|
@ -113,6 +144,7 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{
|
|||
}
|
||||
d.Set("cloudfront_domain_name", domainName.DistributionDomainName)
|
||||
d.Set("domain_name", domainName.DomainName)
|
||||
d.Set("certificate_arn", domainName.CertificateArn)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -128,6 +160,14 @@ func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []*
|
|||
})
|
||||
}
|
||||
|
||||
if d.HasChange("certificate_arn") {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/certificateArn"),
|
||||
Value: aws.String(d.Get("certificate_arn").(string)),
|
||||
})
|
||||
}
|
||||
|
||||
return operations
|
||||
}
|
||||
|
||||
|
@ -139,6 +179,7 @@ func resourceAwsApiGatewayDomainNameUpdate(d *schema.ResourceData, meta interfac
|
|||
DomainName: aws.String(d.Id()),
|
||||
PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -472,13 +472,15 @@ func TestAccAWSAutoScalingGroup_ALB_TargetGroups_ELBCapacity(t *testing.T) {
|
|||
var group autoscaling.Group
|
||||
var tg elbv2.TargetGroup
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity,
|
||||
Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
|
||||
testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg),
|
||||
|
@ -1386,7 +1388,8 @@ resource "aws_autoscaling_group" "bar" {
|
|||
`, name)
|
||||
}
|
||||
|
||||
const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity = `
|
||||
func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
@ -1420,7 +1423,7 @@ resource "aws_alb_listener" "test_listener" {
|
|||
}
|
||||
|
||||
resource "aws_alb_target_group" "test" {
|
||||
name = "tf-example-alb-tg"
|
||||
name = "tf-alb-test-%d"
|
||||
port = 80
|
||||
protocol = "HTTP"
|
||||
vpc_id = "${aws_vpc.default.id}"
|
||||
|
@ -1431,6 +1434,10 @@ resource "aws_alb_target_group" "test" {
|
|||
timeout = "2"
|
||||
interval = "5"
|
||||
}
|
||||
|
||||
tags {
|
||||
Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "main" {
|
||||
|
@ -1522,8 +1529,8 @@ resource "aws_autoscaling_group" "bar" {
|
|||
force_delete = true
|
||||
termination_policies = ["OldestInstance"]
|
||||
launch_configuration = "${aws_launch_configuration.foobar.name}"
|
||||
}`, rInt)
|
||||
}
|
||||
`
|
||||
|
||||
func testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
|
|
|
@ -164,6 +164,13 @@ func resourceAwsCodeBuildProject() *schema.Resource {
|
|||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ValidateFunc: validateAwsCodeBuildTimeout,
|
||||
Removed: "This field has been removed. Please use build_timeout instead",
|
||||
},
|
||||
"build_timeout": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: "60",
|
||||
ValidateFunc: validateAwsCodeBuildTimeout,
|
||||
},
|
||||
"tags": tagsSchema(),
|
||||
},
|
||||
|
@ -196,7 +203,7 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{})
|
|||
params.ServiceRole = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("timeout"); ok {
|
||||
if v, ok := d.GetOk("build_timeout"); ok {
|
||||
params.TimeoutInMinutes = aws.Int64(int64(v.(int)))
|
||||
}
|
||||
|
||||
|
@ -373,7 +380,7 @@ func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) e
|
|||
d.Set("encryption_key", project.EncryptionKey)
|
||||
d.Set("name", project.Name)
|
||||
d.Set("service_role", project.ServiceRole)
|
||||
d.Set("timeout", project.TimeoutInMinutes)
|
||||
d.Set("build_timeout", project.TimeoutInMinutes)
|
||||
|
||||
if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil {
|
||||
return err
|
||||
|
@ -416,8 +423,8 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{})
|
|||
params.ServiceRole = aws.String(d.Get("service_role").(string))
|
||||
}
|
||||
|
||||
if d.HasChange("timeout") {
|
||||
params.TimeoutInMinutes = aws.Int64(int64(d.Get("timeout").(int)))
|
||||
if d.HasChange("build_timeout") {
|
||||
params.TimeoutInMinutes = aws.Int64(int64(d.Get("build_timeout").(int)))
|
||||
}
|
||||
|
||||
// The documentation clearly says "The replacement set of tags for this build project."
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func resourceAwsCodebuildMigrateState(
|
||||
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||
switch v {
|
||||
case 0:
|
||||
log.Println("[INFO] Found AWS Codebuild State v0; migrating to v1")
|
||||
return migrateCodebuildStateV0toV1(is)
|
||||
default:
|
||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||
}
|
||||
}
|
||||
|
||||
func migrateCodebuildStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||
if is.Empty() {
|
||||
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||
return is, nil
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||
|
||||
if is.Attributes["timeout"] != "" {
|
||||
is.Attributes["build_timeout"] = strings.TrimSpace(is.Attributes["timeout"])
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||
return is, nil
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAWSCodebuildMigrateState(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
StateVersion int
|
||||
ID string
|
||||
Attributes map[string]string
|
||||
Expected string
|
||||
Meta interface{}
|
||||
}{
|
||||
"v0_1": {
|
||||
StateVersion: 0,
|
||||
ID: "tf-testing-file",
|
||||
Attributes: map[string]string{
|
||||
"description": "some description",
|
||||
"timeout": "5",
|
||||
},
|
||||
Expected: "5",
|
||||
},
|
||||
"v0_2": {
|
||||
StateVersion: 0,
|
||||
ID: "tf-testing-file",
|
||||
Attributes: map[string]string{
|
||||
"description": "some description",
|
||||
"build_timeout": "5",
|
||||
},
|
||||
Expected: "5",
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range cases {
|
||||
is := &terraform.InstanceState{
|
||||
ID: tc.ID,
|
||||
Attributes: tc.Attributes,
|
||||
}
|
||||
is, err := resourceAwsCodebuildMigrateState(
|
||||
tc.StateVersion, is, tc.Meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||
}
|
||||
|
||||
if is.Attributes["build_timeout"] != tc.Expected {
|
||||
t.Fatalf("Bad build_timeout migration: %s\n\n expected: %s", is.Attributes["build_timeout"], tc.Expected)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,19 +25,51 @@ func TestAccAWSCodeBuildProject_basic(t *testing.T) {
|
|||
Config: testAccAWSCodeBuildProjectConfig_basic(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_codebuild_project.foo", "build_timeout", "5"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_codebuild_project.foo", "build_timeout", "50"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
|
||||
func TestAccAWSCodeBuildProject_default_build_timeout(t *testing.T) {
|
||||
name := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSCodeBuildProjectDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSCodeBuildProjectConfig_default_timeout(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_codebuild_project.foo", "build_timeout", "60"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_codebuild_project.foo", "build_timeout", "50"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -57,7 +89,7 @@ func TestAccAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -94,7 +126,7 @@ func longTestData() string {
|
|||
}, data)
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_nameValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_nameValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -115,7 +147,7 @@ func TestAccAWSCodeBuildProject_nameValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_descriptionValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_descriptionValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -133,7 +165,7 @@ func TestAccAWSCodeBuildProject_descriptionValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -153,7 +185,7 @@ func TestAccAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -171,7 +203,7 @@ func TestAccAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -192,7 +224,7 @@ func TestAccAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
|
@ -210,7 +242,7 @@ func TestAccAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccAWSCodeBuildProject_timeoutValidation(t *testing.T) {
|
||||
func TestAWSCodeBuildProject_timeoutValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value int
|
||||
ErrCount int
|
||||
|
@ -342,7 +374,7 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
|
|||
resource "aws_codebuild_project" "foo" {
|
||||
name = "test-project-%s"
|
||||
description = "test_codebuild_project"
|
||||
timeout = "5"
|
||||
build_timeout = "5"
|
||||
service_role = "${aws_iam_role.codebuild_role.arn}"
|
||||
|
||||
artifacts {
|
||||
|
@ -429,7 +461,94 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
|
|||
resource "aws_codebuild_project" "foo" {
|
||||
name = "test-project-%s"
|
||||
description = "test_codebuild_project"
|
||||
timeout = "5"
|
||||
build_timeout = "50"
|
||||
service_role = "${aws_iam_role.codebuild_role.arn}"
|
||||
|
||||
artifacts {
|
||||
type = "NO_ARTIFACTS"
|
||||
}
|
||||
|
||||
environment {
|
||||
compute_type = "BUILD_GENERAL1_SMALL"
|
||||
image = "2"
|
||||
type = "LINUX_CONTAINER"
|
||||
|
||||
environment_variable = {
|
||||
"name" = "SOME_OTHERKEY"
|
||||
"value" = "SOME_OTHERVALUE"
|
||||
}
|
||||
}
|
||||
|
||||
source {
|
||||
auth {
|
||||
type = "OAUTH"
|
||||
}
|
||||
|
||||
type = "GITHUB"
|
||||
location = "https://github.com/mitchellh/packer.git"
|
||||
}
|
||||
|
||||
tags {
|
||||
"Environment" = "Test"
|
||||
}
|
||||
}
|
||||
`, rName, rName, rName, rName)
|
||||
}
|
||||
|
||||
func testAccAWSCodeBuildProjectConfig_default_timeout(rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "codebuild_role" {
|
||||
name = "codebuild-role-%s"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "codebuild.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "codebuild_policy" {
|
||||
name = "codebuild-policy-%s"
|
||||
path = "/service-role/"
|
||||
description = "Policy used in trust relationship with CodeBuild"
|
||||
policy = <<POLICY
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Action": [
|
||||
"logs:CreateLogGroup",
|
||||
"logs:CreateLogStream",
|
||||
"logs:PutLogEvents"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
POLICY
|
||||
}
|
||||
|
||||
resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
|
||||
name = "codebuild-policy-attachment-%s"
|
||||
policy_arn = "${aws_iam_policy.codebuild_policy.arn}"
|
||||
roles = ["${aws_iam_role.codebuild_role.id}"]
|
||||
}
|
||||
|
||||
resource "aws_codebuild_project" "foo" {
|
||||
name = "test-project-%s"
|
||||
description = "test_codebuild_project"
|
||||
|
||||
service_role = "${aws_iam_role.codebuild_role.arn}"
|
||||
|
||||
artifacts {
|
||||
|
|
|
@ -25,19 +25,19 @@ func resourceAwsCustomerGateway() *schema.Resource {
|
|||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"bgp_asn": &schema.Schema{
|
||||
"bgp_asn": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"ip_address": &schema.Schema{
|
||||
"ip_address": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"type": &schema.Schema{
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
|
@ -51,10 +51,23 @@ func resourceAwsCustomerGateway() *schema.Resource {
|
|||
func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).ec2conn
|
||||
|
||||
ipAddress := d.Get("ip_address").(string)
|
||||
vpnType := d.Get("type").(string)
|
||||
bgpAsn := d.Get("bgp_asn").(int)
|
||||
|
||||
alreadyExists, err := resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if alreadyExists {
|
||||
return fmt.Errorf("An existing customer gateway for IpAddress: %s, VpnType: %s, BGP ASN: %d has been found", ipAddress, vpnType, bgpAsn)
|
||||
}
|
||||
|
||||
createOpts := &ec2.CreateCustomerGatewayInput{
|
||||
BgpAsn: aws.Int64(int64(d.Get("bgp_asn").(int))),
|
||||
PublicIp: aws.String(d.Get("ip_address").(string)),
|
||||
Type: aws.String(d.Get("type").(string)),
|
||||
BgpAsn: aws.Int64(int64(bgpAsn)),
|
||||
PublicIp: aws.String(ipAddress),
|
||||
Type: aws.String(vpnType),
|
||||
}
|
||||
|
||||
// Create the Customer Gateway.
|
||||
|
@ -123,6 +136,37 @@ func customerGatewayRefreshFunc(conn *ec2.EC2, gatewayId string) resource.StateR
|
|||
}
|
||||
}
|
||||
|
||||
func resourceAwsCustomerGatewayExists(vpnType, ipAddress string, bgpAsn int, conn *ec2.EC2) (bool, error) {
|
||||
ipAddressFilter := &ec2.Filter{
|
||||
Name: aws.String("ip-address"),
|
||||
Values: []*string{aws.String(ipAddress)},
|
||||
}
|
||||
|
||||
typeFilter := &ec2.Filter{
|
||||
Name: aws.String("type"),
|
||||
Values: []*string{aws.String(vpnType)},
|
||||
}
|
||||
|
||||
bgp := strconv.Itoa(bgpAsn)
|
||||
bgpAsnFilter := &ec2.Filter{
|
||||
Name: aws.String("bgp-asn"),
|
||||
Values: []*string{aws.String(bgp)},
|
||||
}
|
||||
|
||||
resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{
|
||||
Filters: []*ec2.Filter{ipAddressFilter, typeFilter, bgpAsnFilter},
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(resp.CustomerGateways) > 0 && *resp.CustomerGateways[0].State != "deleted" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).ec2conn
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package aws
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -21,19 +22,19 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccCustomerGatewayConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccCustomerGatewayConfigUpdateTags,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccCustomerGatewayConfigForceReplace,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||
|
@ -43,6 +44,28 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSCustomerGateway_similarAlreadyExists(t *testing.T) {
|
||||
var gateway ec2.CustomerGateway
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_customer_gateway.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCustomerGatewayConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccCustomerGatewayConfigIdentical,
|
||||
ExpectError: regexp.MustCompile("An existing customer gateway"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSCustomerGateway_disappears(t *testing.T) {
|
||||
var gateway ec2.CustomerGateway
|
||||
resource.Test(t, resource.TestCase{
|
||||
|
@ -50,7 +73,7 @@ func TestAccAWSCustomerGateway_disappears(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckCustomerGatewayDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccCustomerGatewayConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
|
||||
|
@ -178,6 +201,26 @@ resource "aws_customer_gateway" "foo" {
|
|||
}
|
||||
`
|
||||
|
||||
const testAccCustomerGatewayConfigIdentical = `
|
||||
resource "aws_customer_gateway" "foo" {
|
||||
bgp_asn = 65000
|
||||
ip_address = "172.0.0.1"
|
||||
type = "ipsec.1"
|
||||
tags {
|
||||
Name = "foo-gateway"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_customer_gateway" "identical" {
|
||||
bgp_asn = 65000
|
||||
ip_address = "172.0.0.1"
|
||||
type = "ipsec.1"
|
||||
tags {
|
||||
Name = "foo-gateway-identical"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
// Add the Another: "tag" tag.
|
||||
const testAccCustomerGatewayConfigUpdateTags = `
|
||||
resource "aws_customer_gateway" "foo" {
|
||||
|
|
|
@ -839,6 +839,10 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
|||
}
|
||||
d.SetPartial("apply_immediately")
|
||||
|
||||
if !d.Get("apply_immediately").(bool) {
|
||||
log.Println("[INFO] Only settings updating, instance changes will be applied in next maintenance window")
|
||||
}
|
||||
|
||||
requestUpdate := false
|
||||
if d.HasChange("allocated_storage") || d.HasChange("iops") {
|
||||
d.SetPartial("allocated_storage")
|
||||
|
|
|
@ -622,6 +622,10 @@ resource "aws_db_instance" "bar" {
|
|||
backup_retention_period = 0
|
||||
|
||||
parameter_group_name = "default.mysql5.6"
|
||||
|
||||
timeouts {
|
||||
create = "30m"
|
||||
}
|
||||
}`
|
||||
|
||||
var testAccAWSDBInstanceConfigKmsKeyId = `
|
||||
|
|
|
@ -17,56 +17,66 @@ func resourceAwsDefaultRouteTable() *schema.Resource {
|
|||
Delete: resourceAwsDefaultRouteTableDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"default_route_table_id": &schema.Schema{
|
||||
"default_route_table_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"propagating_vgws": &schema.Schema{
|
||||
"propagating_vgws": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"route": &schema.Schema{
|
||||
"route": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cidr_block": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"gateway_id": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"instance_id": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": &schema.Schema{
|
||||
"egress_only_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": &schema.Schema{
|
||||
"gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"network_interface_id": &schema.Schema{
|
||||
"instance_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"network_interface_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -193,16 +203,33 @@ func revokeAllRouteTableRules(defaultRouteTableId string, meta interface{}) erro
|
|||
// See aws_vpc_endpoint
|
||||
continue
|
||||
}
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
defaultRouteTableId, *r.DestinationCidrBlock)
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(defaultRouteTableId),
|
||||
DestinationCidrBlock: r.DestinationCidrBlock,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if r.DestinationCidrBlock != nil {
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
defaultRouteTableId, *r.DestinationCidrBlock)
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(defaultRouteTableId),
|
||||
DestinationCidrBlock: r.DestinationCidrBlock,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.DestinationIpv6CidrBlock != nil {
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
defaultRouteTableId, *r.DestinationIpv6CidrBlock)
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(defaultRouteTableId),
|
||||
DestinationIpv6CidrBlock: r.DestinationIpv6CidrBlock,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSDefaultRouteTable_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTableConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -40,7 +40,7 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTable_change,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -53,7 +53,7 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) {
|
|||
// behavior that may happen, in which case a follow up plan will show (in
|
||||
// this case) a diff as the table now needs to be updated to match the
|
||||
// config
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTable_change_mod,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -74,7 +74,7 @@ func TestAccAWSDefaultRouteTable_vpc_endpoint(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTable_vpc_endpoint,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
|
|
@ -27,7 +27,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
|
|||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cdc_start_time": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
// Requires a Unix timestamp in seconds. Example 1484346880
|
||||
},
|
||||
|
@ -57,9 +57,10 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
|
|||
ValidateFunc: validateDmsReplicationTaskId,
|
||||
},
|
||||
"replication_task_settings": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateJsonString,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateJsonString,
|
||||
DiffSuppressFunc: suppressEquivalentJsonDiffs,
|
||||
},
|
||||
"source_endpoint_arn": {
|
||||
Type: schema.TypeString,
|
||||
|
@ -68,9 +69,10 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
|
|||
ValidateFunc: validateArn,
|
||||
},
|
||||
"table_mappings": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateJsonString,
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateJsonString,
|
||||
DiffSuppressFunc: suppressEquivalentJsonDiffs,
|
||||
},
|
||||
"tags": {
|
||||
Type: schema.TypeMap,
|
||||
|
|
|
@ -70,11 +70,13 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
|||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"host_path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -135,6 +135,41 @@ func TestAccAWSEcsTaskDefinition_constraint(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSEcsTaskDefinition_changeVolumesForcesNewResource(t *testing.T) {
|
||||
var before ecs.TaskDefinition
|
||||
var after ecs.TaskDefinition
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSEcsTaskDefinition,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &before),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSEcsTaskDefinitionUpdatedVolume,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &after),
|
||||
testAccCheckEcsTaskDefinitionRecreated(t, &before, &after),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckEcsTaskDefinitionRecreated(t *testing.T,
|
||||
before, after *ecs.TaskDefinition) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if *before.Revision == *after.Revision {
|
||||
t.Fatalf("Expected change of TaskDefinition Revisions, but both were %v", before.Revision)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if len(def.PlacementConstraints) != 1 {
|
||||
|
@ -319,6 +354,55 @@ TASK_DEFINITION
|
|||
}
|
||||
`
|
||||
|
||||
var testAccAWSEcsTaskDefinitionUpdatedVolume = `
|
||||
resource "aws_ecs_task_definition" "jenkins" {
|
||||
family = "terraform-acc-test"
|
||||
container_definitions = <<TASK_DEFINITION
|
||||
[
|
||||
{
|
||||
"cpu": 10,
|
||||
"command": ["sleep", "10"],
|
||||
"entryPoint": ["/"],
|
||||
"environment": [
|
||||
{"name": "VARNAME", "value": "VARVAL"}
|
||||
],
|
||||
"essential": true,
|
||||
"image": "jenkins",
|
||||
"links": ["mongodb"],
|
||||
"memory": 128,
|
||||
"name": "jenkins",
|
||||
"portMappings": [
|
||||
{
|
||||
"containerPort": 80,
|
||||
"hostPort": 8080
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cpu": 10,
|
||||
"command": ["sleep", "10"],
|
||||
"entryPoint": ["/"],
|
||||
"essential": true,
|
||||
"image": "mongodb",
|
||||
"memory": 128,
|
||||
"name": "mongodb",
|
||||
"portMappings": [
|
||||
{
|
||||
"containerPort": 28017,
|
||||
"hostPort": 28017
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
TASK_DEFINITION
|
||||
|
||||
volume {
|
||||
name = "jenkins-home"
|
||||
host_path = "/ecs/jenkins"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var testAccAWSEcsTaskDefinitionWithScratchVolume = `
|
||||
resource "aws_ecs_task_definition" "sleep" {
|
||||
family = "terraform-acc-sc-volume-test"
|
||||
|
|
|
@ -108,15 +108,15 @@ resource "aws_s3_bucket_object" "default" {
|
|||
}
|
||||
|
||||
resource "aws_elastic_beanstalk_application" "default" {
|
||||
name = "tf-test-name"
|
||||
name = "tf-test-name-%d"
|
||||
description = "tf-test-desc"
|
||||
}
|
||||
|
||||
resource "aws_elastic_beanstalk_application_version" "default" {
|
||||
application = "tf-test-name"
|
||||
application = "tf-test-name-%d"
|
||||
name = "tf-test-version-label"
|
||||
bucket = "${aws_s3_bucket.default.id}"
|
||||
key = "${aws_s3_bucket_object.default.id}"
|
||||
}
|
||||
`, randInt)
|
||||
`, randInt, randInt, randInt)
|
||||
}
|
||||
|
|
|
@ -388,7 +388,9 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
|
|||
}
|
||||
}
|
||||
d.Set("subnets", flattenStringList(lb.Subnets))
|
||||
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
||||
if lbAttrs.ConnectionSettings != nil {
|
||||
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
||||
}
|
||||
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
|
||||
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
||||
d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled)
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestAccAWSELB_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -70,7 +70,7 @@ func TestAccAWSELB_fullCharacterRange(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
|
@ -93,14 +93,14 @@ func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogsOn(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
|
@ -115,7 +115,7 @@ func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
|
@ -138,14 +138,14 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogsDisabled(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
|
@ -160,7 +160,7 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
|
@ -182,7 +182,7 @@ func TestAccAWSELB_generatedName(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBGeneratedName,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
|
@ -203,7 +203,7 @@ func TestAccAWSELB_availabilityZones(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -218,7 +218,7 @@ func TestAccAWSELB_availabilityZones(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_AvailabilityZonesUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -244,7 +244,7 @@ func TestAccAWSELB_tags(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -254,7 +254,7 @@ func TestAccAWSELB_tags(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_TagUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -285,7 +285,7 @@ func TestAccAWSELB_iam_server_cert(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccELBIAMServerCertConfig(
|
||||
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
|
@ -306,7 +306,7 @@ func TestAccAWSELB_swap_subnets(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_subnets,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
||||
|
@ -315,7 +315,7 @@ func TestAccAWSELB_swap_subnets(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_subnet_swap,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
||||
|
@ -363,7 +363,7 @@ func TestAccAWSELB_InstanceAttaching(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -371,7 +371,7 @@ func TestAccAWSELB_InstanceAttaching(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigNewInstance,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -391,7 +391,7 @@ func TestAccAWSELBUpdate_Listener(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -401,7 +401,7 @@ func TestAccAWSELBUpdate_Listener(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigListener_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -422,7 +422,7 @@ func TestAccAWSELB_HealthCheck(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigHealthCheck,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -450,14 +450,14 @@ func TestAccAWSELBUpdate_HealthCheck(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigHealthCheck,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elb.bar", "health_check.0.healthy_threshold", "5"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigHealthCheck_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -477,7 +477,7 @@ func TestAccAWSELB_Timeout(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigIdleTimeout,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
|
@ -497,7 +497,7 @@ func TestAccAWSELBUpdate_Timeout(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigIdleTimeout,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -505,7 +505,7 @@ func TestAccAWSELBUpdate_Timeout(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigIdleTimeout_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -524,7 +524,7 @@ func TestAccAWSELB_ConnectionDraining(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -546,7 +546,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -557,7 +557,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining_update_timeout,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -568,7 +568,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining_update_disable,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -587,7 +587,7 @@ func TestAccAWSELB_SecurityGroups(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
// ELBs get a default security group
|
||||
|
@ -596,7 +596,7 @@ func TestAccAWSELB_SecurityGroups(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigSecurityGroups,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
// Count should still be one as we swap in a custom security group
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAwsIamAccountAlias() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAwsIamAccountAliasCreate,
|
||||
Read: resourceAwsIamAccountAliasRead,
|
||||
Delete: resourceAwsIamAccountAliasDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_alias": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateAccountAlias,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsIamAccountAliasCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).iamconn
|
||||
|
||||
account_alias := d.Get("account_alias").(string)
|
||||
|
||||
params := &iam.CreateAccountAliasInput{
|
||||
AccountAlias: aws.String(account_alias),
|
||||
}
|
||||
|
||||
_, err := conn.CreateAccountAlias(params)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating account alias with name %s", account_alias)
|
||||
}
|
||||
|
||||
d.SetId(account_alias)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).iamconn
|
||||
|
||||
params := &iam.ListAccountAliasesInput{}
|
||||
|
||||
resp, err := conn.ListAccountAliases(params)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp == nil || len(resp.AccountAliases) == 0 {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
account_alias := aws.StringValue(resp.AccountAliases[0])
|
||||
|
||||
d.SetId(account_alias)
|
||||
d.Set("account_alias", account_alias)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsIamAccountAliasDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).iamconn
|
||||
|
||||
account_alias := d.Get("account_alias").(string)
|
||||
|
||||
params := &iam.DeleteAccountAliasInput{
|
||||
AccountAlias: aws.String(account_alias),
|
||||
}
|
||||
|
||||
_, err := conn.DeleteAccountAlias(params)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting account alias with name %s", account_alias)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSIAMAccountAlias_basic(t *testing.T) {
|
||||
var account_alias string
|
||||
|
||||
rstring := acctest.RandString(5)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSIAMAccountAliasConfig(rstring),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSIAMAccountAliasExists("aws_iam_account_alias.test", &account_alias),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSIAMAccountAliasDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "aws_iam_account_alias" {
|
||||
continue
|
||||
}
|
||||
|
||||
params := &iam.ListAccountAliasesInput{}
|
||||
|
||||
resp, err := conn.ListAccountAliases(params)
|
||||
|
||||
if err != nil || resp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(resp.AccountAliases) > 0 {
|
||||
return fmt.Errorf("Bad: Account alias still exists: %q", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckAWSIAMAccountAliasExists(n string, a *string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
params := &iam.ListAccountAliasesInput{}
|
||||
|
||||
resp, err := conn.ListAccountAliases(params)
|
||||
|
||||
if err != nil || resp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(resp.AccountAliases) == 0 {
|
||||
return fmt.Errorf("Bad: Account alias %q does not exist", rs.Primary.ID)
|
||||
}
|
||||
|
||||
*a = aws.StringValue(resp.AccountAliases[0])
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccAWSIAMAccountAliasConfig(rstring string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_account_alias" "test" {
|
||||
account_alias = "terraform-%s-alias"
|
||||
}
|
||||
`, rstring)
|
||||
}
|
|
@ -2,10 +2,12 @@ package aws
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
|
@ -70,6 +72,11 @@ func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
out, err := iamconn.GetSAMLProvider(input)
|
||||
if err != nil {
|
||||
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
|
||||
log.Printf("[WARN] IAM SAML Provider %q not found.", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -548,7 +548,7 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
|||
}
|
||||
|
||||
if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil {
|
||||
log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%d): %s", d.Id(), err)
|
||||
log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err)
|
||||
}
|
||||
|
||||
d.Set("ebs_optimized", instance.EbsOptimized)
|
||||
|
@ -1034,10 +1034,15 @@ func readBlockDeviceMappingsFromConfig(
|
|||
|
||||
if v, ok := bd["volume_type"].(string); ok && v != "" {
|
||||
ebs.VolumeType = aws.String(v)
|
||||
}
|
||||
|
||||
if v, ok := bd["iops"].(int); ok && v > 0 {
|
||||
ebs.Iops = aws.Int64(int64(v))
|
||||
if "io1" == strings.ToLower(v) {
|
||||
// Condition: This parameter is required for requests to create io1
|
||||
// volumes; it is not used in requests to create gp2, st1, sc1, or
|
||||
// standard volumes.
|
||||
// See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
|
||||
if v, ok := bd["iops"].(int); ok && v > 0 {
|
||||
ebs.Iops = aws.Int64(int64(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{
|
||||
|
|
|
@ -1060,7 +1060,6 @@ resource "aws_instance" "foo" {
|
|||
root_block_device {
|
||||
volume_type = "gp2"
|
||||
volume_size = 11
|
||||
iops = 330
|
||||
}
|
||||
}
|
||||
`
|
||||
|
|
|
@ -7,12 +7,14 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/lambda"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
||||
var conf lambda.AliasConfiguration
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
|
@ -20,7 +22,7 @@ func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
|||
CheckDestroy: testAccCheckAwsLambdaAliasDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAwsLambdaAliasConfig,
|
||||
Config: testAccAwsLambdaAliasConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaAliasExists("aws_lambda_alias.lambda_alias_test", &conf),
|
||||
testAccCheckAwsLambdaAttributes(&conf),
|
||||
|
@ -95,9 +97,10 @@ func testAccCheckAwsLambdaAttributes(mapping *lambda.AliasConfiguration) resourc
|
|||
}
|
||||
}
|
||||
|
||||
const testAccAwsLambdaAliasConfig = `
|
||||
func testAccAwsLambdaAliasConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "iam_for_lambda" {
|
||||
name = "iam_for_lambda"
|
||||
name = "iam_for_lambda_%d"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
|
@ -117,7 +120,7 @@ EOF
|
|||
}
|
||||
|
||||
resource "aws_iam_policy" "policy_for_role" {
|
||||
name = "policy_for_role"
|
||||
name = "policy_for_role_%d"
|
||||
path = "/"
|
||||
description = "IAM policy for for Lamda alias testing"
|
||||
|
||||
|
@ -138,7 +141,7 @@ EOF
|
|||
}
|
||||
|
||||
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
|
||||
name = "policy_attachment_for_role"
|
||||
name = "policy_attachment_for_role_%d"
|
||||
roles = ["${aws_iam_role.iam_for_lambda.name}"]
|
||||
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
|
||||
}
|
||||
|
@ -156,5 +159,5 @@ resource "aws_lambda_alias" "lambda_alias_test" {
|
|||
description = "a sample description"
|
||||
function_name = "${aws_lambda_function.lambda_function_test_create.arn}"
|
||||
function_version = "$LATEST"
|
||||
}`, rInt, rInt, rInt)
|
||||
}
|
||||
`
|
||||
|
|
|
@ -389,9 +389,9 @@ func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) err
|
|||
last := p.Versions[len(p.Versions)-1]
|
||||
lastVersion = *last.Version
|
||||
lastQualifiedArn = *last.FunctionArn
|
||||
return true
|
||||
return false
|
||||
}
|
||||
return false
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -416,6 +416,7 @@ func listVersionsByFunctionPages(c *lambda.Lambda, input *lambda.ListVersionsByF
|
|||
if !shouldContinue || lastPage {
|
||||
break
|
||||
}
|
||||
input.Marker = page.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -292,6 +292,8 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(path)
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
|
@ -301,7 +303,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
|||
PreConfig: func() {
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_local(path),
|
||||
Config: genAWSLambdaFunctionConfig_local(path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
||||
|
@ -313,7 +315,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
|||
PreConfig: func() {
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_local(path),
|
||||
Config: genAWSLambdaFunctionConfig_local(path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
||||
|
@ -387,6 +389,8 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
|||
bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger)
|
||||
key := "lambda-func.zip"
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
|
@ -397,7 +401,7 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
|||
// Upload 1st version
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
||||
|
@ -411,12 +415,12 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
|||
// Upload 2nd version
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||
},
|
||||
// Extra step because of missing ComputedWhen
|
||||
// See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330
|
||||
{
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
||||
|
@ -1101,7 +1105,7 @@ resource "aws_lambda_function" "lambda_function_test" {
|
|||
|
||||
const testAccAWSLambdaFunctionConfig_local_tpl = `
|
||||
resource "aws_iam_role" "iam_for_lambda" {
|
||||
name = "iam_for_lambda"
|
||||
name = "iam_for_lambda_%d"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
|
@ -1128,8 +1132,8 @@ resource "aws_lambda_function" "lambda_function_local" {
|
|||
}
|
||||
`
|
||||
|
||||
func genAWSLambdaFunctionConfig_local(filePath string) string {
|
||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl,
|
||||
func genAWSLambdaFunctionConfig_local(filePath string, rInt int) string {
|
||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl, rInt,
|
||||
filePath, filePath)
|
||||
}
|
||||
|
||||
|
@ -1182,7 +1186,7 @@ resource "aws_s3_bucket_object" "o" {
|
|||
etag = "${md5(file("%s"))}"
|
||||
}
|
||||
resource "aws_iam_role" "iam_for_lambda" {
|
||||
name = "iam_for_lambda"
|
||||
name = "iam_for_lambda_%d"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
|
@ -1210,9 +1214,9 @@ resource "aws_lambda_function" "lambda_function_s3" {
|
|||
}
|
||||
`
|
||||
|
||||
func genAWSLambdaFunctionConfig_s3(bucket, key, path string) string {
|
||||
func genAWSLambdaFunctionConfig_s3(bucket, key, path string, rInt int) string {
|
||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_tpl,
|
||||
bucket, key, path, path)
|
||||
bucket, key, path, path, rInt)
|
||||
}
|
||||
|
||||
func testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key, path string) string {
|
||||
|
|
|
@ -28,20 +28,20 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
|||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Computed: false,
|
||||
},
|
||||
"subnet_id": &schema.Schema{
|
||||
"subnet_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: false,
|
||||
Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead",
|
||||
},
|
||||
"subnet_ids": &schema.Schema{
|
||||
"subnet_ids": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -49,42 +49,46 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
|||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"ingress": &schema.Schema{
|
||||
"ingress": {
|
||||
Type: schema.TypeSet,
|
||||
Required: false,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": &schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"rule_no": &schema.Schema{
|
||||
"rule_no": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"action": &schema.Schema{
|
||||
"action": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_code": &schema.Schema{
|
||||
"icmp_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -92,42 +96,46 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
|||
},
|
||||
Set: resourceAwsNetworkAclEntryHash,
|
||||
},
|
||||
"egress": &schema.Schema{
|
||||
"egress": {
|
||||
Type: schema.TypeSet,
|
||||
Required: false,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": &schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"rule_no": &schema.Schema{
|
||||
"rule_no": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"action": &schema.Schema{
|
||||
"action": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_code": &schema.Schema{
|
||||
"icmp_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -389,25 +397,36 @@ func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2
|
|||
}
|
||||
}
|
||||
|
||||
// AWS mutates the CIDR block into a network implied by the IP and
|
||||
// mask provided. This results in hashing inconsistencies between
|
||||
// the local config file and the state returned by the API. Error
|
||||
// if the user provides a CIDR block with an inappropriate mask
|
||||
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
|
||||
return err
|
||||
if add.CidrBlock != nil {
|
||||
// AWS mutates the CIDR block into a network implied by the IP and
|
||||
// mask provided. This results in hashing inconsistencies between
|
||||
// the local config file and the state returned by the API. Error
|
||||
// if the user provides a CIDR block with an inappropriate mask
|
||||
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new Acl entry
|
||||
_, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{
|
||||
createOpts := &ec2.CreateNetworkAclEntryInput{
|
||||
NetworkAclId: aws.String(d.Id()),
|
||||
CidrBlock: add.CidrBlock,
|
||||
Egress: add.Egress,
|
||||
PortRange: add.PortRange,
|
||||
Protocol: add.Protocol,
|
||||
RuleAction: add.RuleAction,
|
||||
RuleNumber: add.RuleNumber,
|
||||
IcmpTypeCode: add.IcmpTypeCode,
|
||||
})
|
||||
}
|
||||
|
||||
if add.CidrBlock != nil {
|
||||
createOpts.CidrBlock = add.CidrBlock
|
||||
}
|
||||
|
||||
if add.Ipv6CidrBlock != nil {
|
||||
createOpts.Ipv6CidrBlock = add.Ipv6CidrBlock
|
||||
}
|
||||
|
||||
// Add new Acl entry
|
||||
_, connErr := conn.CreateNetworkAclEntry(createOpts)
|
||||
if connErr != nil {
|
||||
return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
|
||||
}
|
||||
|
@ -520,7 +539,13 @@ func resourceAwsNetworkAclEntryHash(v interface{}) int {
|
|||
buf.WriteString(fmt.Sprintf("%s-", protocol))
|
||||
}
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["cidr_block"].(string)))
|
||||
if v, ok := m["cidr_block"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
if v, ok := m["ipv6_cidr_block"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
if v, ok := m["ssl_certificate_id"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
|
@ -539,11 +564,11 @@ func resourceAwsNetworkAclEntryHash(v interface{}) int {
|
|||
func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) {
|
||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("default"),
|
||||
Values: []*string{aws.String("true")},
|
||||
},
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("vpc-id"),
|
||||
Values: []*string{aws.String(vpc_id)},
|
||||
},
|
||||
|
@ -559,7 +584,7 @@ func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.Network
|
|||
func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) {
|
||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("association.subnet-id"),
|
||||
Values: []*string{aws.String(subnetId)},
|
||||
},
|
||||
|
@ -587,8 +612,12 @@ func networkAclEntriesToMapList(networkAcls []*ec2.NetworkAclEntry) []map[string
|
|||
acl := make(map[string]interface{})
|
||||
acl["rule_no"] = *entry.RuleNumber
|
||||
acl["action"] = *entry.RuleAction
|
||||
acl["cidr_block"] = *entry.CidrBlock
|
||||
|
||||
if entry.CidrBlock != nil {
|
||||
acl["cidr_block"] = *entry.CidrBlock
|
||||
}
|
||||
if entry.Ipv6CidrBlock != nil {
|
||||
acl["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
|
||||
}
|
||||
// The AWS network ACL API only speaks protocol numbers, and
|
||||
// that's all we record.
|
||||
if _, err := strconv.Atoi(*entry.Protocol); err != nil {
|
||||
|
|
|
@ -21,54 +21,59 @@ func resourceAwsNetworkAclRule() *schema.Resource {
|
|||
Delete: resourceAwsNetworkAclRuleDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"network_acl_id": &schema.Schema{
|
||||
"network_acl_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"rule_number": &schema.Schema{
|
||||
"rule_number": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"egress": &schema.Schema{
|
||||
"egress": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
},
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"rule_action": &schema.Schema{
|
||||
"rule_action": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"from_port": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"icmp_type": &schema.Schema{
|
||||
"icmp_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateICMPArgumentValue,
|
||||
},
|
||||
"icmp_code": &schema.Schema{
|
||||
"icmp_code": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
|
@ -97,7 +102,6 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
|
|||
Egress: aws.Bool(d.Get("egress").(bool)),
|
||||
RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
|
||||
Protocol: aws.String(strconv.Itoa(p)),
|
||||
CidrBlock: aws.String(d.Get("cidr_block").(string)),
|
||||
RuleAction: aws.String(d.Get("rule_action").(string)),
|
||||
PortRange: &ec2.PortRange{
|
||||
From: aws.Int64(int64(d.Get("from_port").(int))),
|
||||
|
@ -105,6 +109,14 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
|
|||
},
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("cidr_block"); ok {
|
||||
params.CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("ipv6_cidr_block"); ok {
|
||||
params.Ipv6CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
|
||||
// Specify additional required fields for ICMP. For the list
|
||||
// of ICMP codes and types, see: http://www.nthelp.com/icmp.html
|
||||
if p == 1 {
|
||||
|
@ -160,6 +172,7 @@ func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) err
|
|||
|
||||
d.Set("rule_number", resp.RuleNumber)
|
||||
d.Set("cidr_block", resp.CidrBlock)
|
||||
d.Set("ipv6_cidr_block", resp.Ipv6CidrBlock)
|
||||
d.Set("egress", resp.Egress)
|
||||
if resp.IcmpTypeCode != nil {
|
||||
d.Set("icmp_code", resp.IcmpTypeCode.Code)
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclRuleBasicConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
||||
|
@ -32,6 +32,24 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAclRule_ipv6(t *testing.T) {
|
||||
var networkAcl ec2.NetworkAcl
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclRuleIpv6Config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceAWSNetworkAclRule_validateICMPArgumentValue(t *testing.T) {
|
||||
type testCases struct {
|
||||
Value string
|
||||
|
@ -195,3 +213,23 @@ resource "aws_network_acl_rule" "wibble" {
|
|||
icmp_code = -1
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSNetworkAclRuleIpv6Config = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.3.0.0/16"
|
||||
}
|
||||
resource "aws_network_acl" "bar" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
}
|
||||
resource "aws_network_acl_rule" "baz" {
|
||||
network_acl_id = "${aws_network_acl.bar.id}"
|
||||
rule_number = 150
|
||||
egress = false
|
||||
protocol = "tcp"
|
||||
rule_action = "allow"
|
||||
ipv6_cidr_block = "::/0"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
}
|
||||
|
||||
`
|
||||
|
|
|
@ -20,34 +20,34 @@ func TestAccAWSNetworkAcl_EgressAndIngressRules(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEgressNIngressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.protocol", "6"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.rule_no", "1"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.from_port", "80"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.from_port", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.to_port", "80"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.to_port", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.action", "allow"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.action", "allow"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.cidr_block", "10.3.0.0/18"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.cidr_block", "10.3.0.0/18"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.protocol", "6"),
|
||||
"aws_network_acl.bar", "egress.3111164687.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.rule_no", "2"),
|
||||
"aws_network_acl.bar", "egress.3111164687.rule_no", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.from_port", "443"),
|
||||
"aws_network_acl.bar", "egress.3111164687.from_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.to_port", "443"),
|
||||
"aws_network_acl.bar", "egress.3111164687.to_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.cidr_block", "10.3.0.0/18"),
|
||||
"aws_network_acl.bar", "egress.3111164687.cidr_block", "10.3.0.0/18"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.action", "allow"),
|
||||
"aws_network_acl.bar", "egress.3111164687.action", "allow"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -63,23 +63,22 @@ func TestAccAWSNetworkAcl_OnlyIngressRules_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIngressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
// testAccCheckSubnetAssociation("aws_network_acl.foos", "aws_subnet.blob"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.protocol", "6"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.rule_no", "2"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.rule_no", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.from_port", "443"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.from_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.to_port", "443"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.to_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.action", "deny"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.action", "deny"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.cidr_block", "10.2.0.0/18"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -95,46 +94,46 @@ func TestAccAWSNetworkAcl_OnlyIngressRules_update(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIngressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
testIngressRuleLength(&networkAcl, 2),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.protocol", "6"),
|
||||
"aws_network_acl.foos", "ingress.401088754.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.rule_no", "1"),
|
||||
"aws_network_acl.foos", "ingress.401088754.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.from_port", "0"),
|
||||
"aws_network_acl.foos", "ingress.401088754.from_port", "0"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.to_port", "22"),
|
||||
"aws_network_acl.foos", "ingress.401088754.to_port", "22"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.action", "deny"),
|
||||
"aws_network_acl.foos", "ingress.401088754.action", "deny"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.cidr_block", "10.2.0.0/18"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.from_port", "443"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.from_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.rule_no", "2"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.rule_no", "2"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIngressConfigChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
testIngressRuleLength(&networkAcl, 1),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.protocol", "6"),
|
||||
"aws_network_acl.foos", "ingress.401088754.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.rule_no", "1"),
|
||||
"aws_network_acl.foos", "ingress.401088754.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.from_port", "0"),
|
||||
"aws_network_acl.foos", "ingress.401088754.from_port", "0"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.to_port", "22"),
|
||||
"aws_network_acl.foos", "ingress.401088754.to_port", "22"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.action", "deny"),
|
||||
"aws_network_acl.foos", "ingress.401088754.action", "deny"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.cidr_block", "10.2.0.0/18"),
|
||||
"aws_network_acl.foos", "ingress.401088754.cidr_block", "10.2.0.0/18"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -150,7 +149,7 @@ func TestAccAWSNetworkAcl_OnlyEgressRules(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEgressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bond", &networkAcl),
|
||||
|
@ -169,13 +168,13 @@ func TestAccAWSNetworkAcl_SubnetChange(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnetConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnetConfigChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSubnetIsNotAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
||||
|
@ -206,7 +205,7 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnet_SubnetIds,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||
|
@ -216,7 +215,7 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnet_SubnetIdsUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||
|
@ -230,6 +229,37 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAcl_ipv6Rules(t *testing.T) {
|
||||
var networkAcl ec2.NetworkAcl
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_network_acl.foos",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIpv6Config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.from_port", "0"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.to_port", "22"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.action", "allow"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.ipv6_cidr_block", "::/0"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
||||
var networkAcl ec2.NetworkAcl
|
||||
|
||||
|
@ -239,7 +269,7 @@ func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEsp,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.testesp", &networkAcl),
|
||||
|
@ -336,7 +366,7 @@ func testAccCheckSubnetIsAssociatedWithAcl(acl string, sub string) resource.Test
|
|||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("association.subnet-id"),
|
||||
Values: []*string{aws.String(subnet.Primary.ID)},
|
||||
},
|
||||
|
@ -362,7 +392,7 @@ func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resourc
|
|||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("association.subnet-id"),
|
||||
Values: []*string{aws.String(subnet.Primary.ID)},
|
||||
},
|
||||
|
@ -379,6 +409,33 @@ func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resourc
|
|||
}
|
||||
}
|
||||
|
||||
const testAccAWSNetworkAclIpv6Config = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
tags {
|
||||
Name = "TestAccAWSNetworkAcl_ipv6Rules"
|
||||
}
|
||||
}
|
||||
resource "aws_subnet" "blob" {
|
||||
cidr_block = "10.1.1.0/24"
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
map_public_ip_on_launch = true
|
||||
}
|
||||
resource "aws_network_acl" "foos" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
ingress = {
|
||||
protocol = "tcp"
|
||||
rule_no = 1
|
||||
action = "allow"
|
||||
ipv6_cidr_block = "::/0"
|
||||
from_port = 0
|
||||
to_port = 22
|
||||
}
|
||||
|
||||
subnet_ids = ["${aws_subnet.blob.id}"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSNetworkAclIngressConfig = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
|
|
|
@ -21,21 +21,21 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||
Update: resourceAwsOpsworksApplicationUpdate,
|
||||
Delete: resourceAwsOpsworksApplicationDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"short_name": &schema.Schema{
|
||||
"short_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
// aws-flow-ruby | java | rails | php | nodejs | static | other
|
||||
"type": &schema.Schema{
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
|
@ -56,62 +56,62 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||
return
|
||||
},
|
||||
},
|
||||
"stack_id": &schema.Schema{
|
||||
"stack_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
// TODO: the following 4 vals are really part of the Attributes array. We should validate that only ones relevant to the chosen type are set, perhaps. (what is the default type? how do they map?)
|
||||
"document_root": &schema.Schema{
|
||||
"document_root": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
//Default: "public",
|
||||
},
|
||||
"rails_env": &schema.Schema{
|
||||
"rails_env": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
//Default: "production",
|
||||
},
|
||||
"auto_bundle_on_deploy": &schema.Schema{
|
||||
"auto_bundle_on_deploy": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
//Default: true,
|
||||
},
|
||||
"aws_flow_ruby_settings": &schema.Schema{
|
||||
"aws_flow_ruby_settings": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"app_source": &schema.Schema{
|
||||
"app_source": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"type": &schema.Schema{
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"url": &schema.Schema{
|
||||
"url": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"username": &schema.Schema{
|
||||
"username": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"password": &schema.Schema{
|
||||
"password": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"revision": &schema.Schema{
|
||||
"revision": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"ssh_key": &schema.Schema{
|
||||
"ssh_key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -121,41 +121,41 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||
// AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.
|
||||
// anything beside auto select will lead into failure in case the instance doesn't exist
|
||||
// XXX: validation?
|
||||
"data_source_type": &schema.Schema{
|
||||
"data_source_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"data_source_database_name": &schema.Schema{
|
||||
"data_source_database_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"data_source_arn": &schema.Schema{
|
||||
"data_source_arn": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"domains": &schema.Schema{
|
||||
"domains": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"environment": &schema.Schema{
|
||||
"environment": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key": &schema.Schema{
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"value": &schema.Schema{
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"secure": &schema.Schema{
|
||||
"secure": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
|
@ -163,18 +163,18 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||
},
|
||||
},
|
||||
},
|
||||
"enable_ssl": &schema.Schema{
|
||||
"enable_ssl": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
"ssl_configuration": &schema.Schema{
|
||||
"ssl_configuration": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
//Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"certificate": &schema.Schema{
|
||||
"certificate": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
|
@ -186,7 +186,7 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||
}
|
||||
},
|
||||
},
|
||||
"private_key": &schema.Schema{
|
||||
"private_key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
|
@ -198,7 +198,7 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||
}
|
||||
},
|
||||
},
|
||||
"chain": &schema.Schema{
|
||||
"chain": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
|
|
|
@ -8,25 +8,30 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||
var opsapp opsworks.App
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
name := fmt.Sprintf("tf-ops-acc-application-%d", rInt)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksApplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAwsOpsworksApplicationCreate,
|
||||
{
|
||||
Config: testAccAwsOpsworksApplicationCreate(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksApplicationExists(
|
||||
"aws_opsworks_application.tf-acc-app", &opsapp),
|
||||
testAccCheckAWSOpsworksCreateAppAttributes(&opsapp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "name", "tf-ops-acc-application",
|
||||
"aws_opsworks_application.tf-acc-app", "name", name,
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "type", "other",
|
||||
|
@ -34,14 +39,14 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "enable_ssl", "false",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "ssl_configuration", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "ssl_configuration",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "domains", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "domains",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "app_source", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "app_source",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.key", "key1",
|
||||
|
@ -49,22 +54,22 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "document_root", "foo",
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccAwsOpsworksApplicationUpdate,
|
||||
{
|
||||
Config: testAccAwsOpsworksApplicationUpdate(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksApplicationExists(
|
||||
"aws_opsworks_application.tf-acc-app", &opsapp),
|
||||
testAccCheckAWSOpsworksUpdateAppAttributes(&opsapp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "name", "tf-ops-acc-application",
|
||||
"aws_opsworks_application.tf-acc-app", "name", name,
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "type", "rails",
|
||||
|
@ -117,8 +122,8 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "document_root", "root",
|
||||
|
@ -188,7 +193,7 @@ func testAccCheckAWSOpsworksCreateAppAttributes(
|
|||
}
|
||||
|
||||
expectedEnv := []*opsworks.EnvironmentVariable{
|
||||
&opsworks.EnvironmentVariable{
|
||||
{
|
||||
Key: aws.String("key1"),
|
||||
Value: aws.String("value1"),
|
||||
Secure: aws.Bool(false),
|
||||
|
@ -248,12 +253,12 @@ func testAccCheckAWSOpsworksUpdateAppAttributes(
|
|||
}
|
||||
|
||||
expectedEnv := []*opsworks.EnvironmentVariable{
|
||||
&opsworks.EnvironmentVariable{
|
||||
{
|
||||
Key: aws.String("key2"),
|
||||
Value: aws.String("*****FILTERED*****"),
|
||||
Secure: aws.Bool(true),
|
||||
},
|
||||
&opsworks.EnvironmentVariable{
|
||||
{
|
||||
Key: aws.String("key1"),
|
||||
Value: aws.String("value1"),
|
||||
Secure: aws.Bool(false),
|
||||
|
@ -308,10 +313,12 @@ func testAccCheckAwsOpsworksApplicationDestroy(s *terraform.State) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var testAccAwsOpsworksApplicationCreate = testAccAwsOpsworksStackConfigVpcCreate("tf-ops-acc-application") + `
|
||||
func testAccAwsOpsworksApplicationCreate(name string) string {
|
||||
return testAccAwsOpsworksStackConfigVpcCreate(name) +
|
||||
fmt.Sprintf(`
|
||||
resource "aws_opsworks_application" "tf-acc-app" {
|
||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||
name = "tf-ops-acc-application"
|
||||
name = "%s"
|
||||
type = "other"
|
||||
enable_ssl = false
|
||||
app_source ={
|
||||
|
@ -320,12 +327,15 @@ resource "aws_opsworks_application" "tf-acc-app" {
|
|||
environment = { key = "key1" value = "value1" secure = false}
|
||||
document_root = "foo"
|
||||
}
|
||||
`
|
||||
`, name)
|
||||
}
|
||||
|
||||
var testAccAwsOpsworksApplicationUpdate = testAccAwsOpsworksStackConfigVpcCreate("tf-ops-acc-application") + `
|
||||
func testAccAwsOpsworksApplicationUpdate(name string) string {
|
||||
return testAccAwsOpsworksStackConfigVpcCreate(name) +
|
||||
fmt.Sprintf(`
|
||||
resource "aws_opsworks_application" "tf-acc-app" {
|
||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||
name = "tf-ops-acc-application"
|
||||
name = "%s"
|
||||
type = "rails"
|
||||
domains = ["example.com", "sub.example.com"]
|
||||
enable_ssl = true
|
||||
|
@ -372,4 +382,5 @@ EOS
|
|||
auto_bundle_on_deploy = "true"
|
||||
rails_env = "staging"
|
||||
}
|
||||
`
|
||||
`, name)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksCustomLayerExists(
|
||||
|
@ -74,7 +74,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -219,7 +219,7 @@ func testAccCheckAWSOpsworksCreateLayerAttributes(
|
|||
}
|
||||
|
||||
expectedEbsVolumes := []*opsworks.VolumeConfiguration{
|
||||
&opsworks.VolumeConfiguration{
|
||||
{
|
||||
VolumeType: aws.String("gp2"),
|
||||
NumberOfDisks: aws.Int64(2),
|
||||
MountPoint: aws.String("/home"),
|
||||
|
|
|
@ -10,17 +10,17 @@ func resourceAwsOpsworksGangliaLayer() *schema.Resource {
|
|||
DefaultLayerName: "Ganglia",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"url": &opsworksLayerTypeAttribute{
|
||||
"url": {
|
||||
AttrName: "GangliaUrl",
|
||||
Type: schema.TypeString,
|
||||
Default: "/ganglia",
|
||||
},
|
||||
"username": &opsworksLayerTypeAttribute{
|
||||
"username": {
|
||||
AttrName: "GangliaUser",
|
||||
Type: schema.TypeString,
|
||||
Default: "opsworks",
|
||||
},
|
||||
"password": &opsworksLayerTypeAttribute{
|
||||
"password": {
|
||||
AttrName: "GangliaPassword",
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
|
|
|
@ -10,33 +10,33 @@ func resourceAwsOpsworksHaproxyLayer() *schema.Resource {
|
|||
DefaultLayerName: "HAProxy",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"stats_enabled": &opsworksLayerTypeAttribute{
|
||||
"stats_enabled": {
|
||||
AttrName: "EnableHaproxyStats",
|
||||
Type: schema.TypeBool,
|
||||
Default: true,
|
||||
},
|
||||
"stats_url": &opsworksLayerTypeAttribute{
|
||||
"stats_url": {
|
||||
AttrName: "HaproxyStatsUrl",
|
||||
Type: schema.TypeString,
|
||||
Default: "/haproxy?stats",
|
||||
},
|
||||
"stats_user": &opsworksLayerTypeAttribute{
|
||||
"stats_user": {
|
||||
AttrName: "HaproxyStatsUser",
|
||||
Type: schema.TypeString,
|
||||
Default: "opsworks",
|
||||
},
|
||||
"stats_password": &opsworksLayerTypeAttribute{
|
||||
"stats_password": {
|
||||
AttrName: "HaproxyStatsPassword",
|
||||
Type: schema.TypeString,
|
||||
WriteOnly: true,
|
||||
Required: true,
|
||||
},
|
||||
"healthcheck_url": &opsworksLayerTypeAttribute{
|
||||
"healthcheck_url": {
|
||||
AttrName: "HaproxyHealthCheckUrl",
|
||||
Type: schema.TypeString,
|
||||
Default: "/",
|
||||
},
|
||||
"healthcheck_method": &opsworksLayerTypeAttribute{
|
||||
"healthcheck_method": {
|
||||
AttrName: "HaproxyHealthCheckMethod",
|
||||
Type: schema.TypeString,
|
||||
Default: "OPTIONS",
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestAccAWSOpsworksInstance(t *testing.T) {
|
|||
"aws_opsworks_instance.tf-acc", "tenancy", "default",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2014.09", // inherited from opsworks_stack_test
|
||||
"aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2016.09", // inherited from opsworks_stack_test
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_instance.tf-acc", "root_device_type", "ebs", // inherited from opsworks_stack_test
|
||||
|
|
|
@ -10,27 +10,27 @@ func resourceAwsOpsworksJavaAppLayer() *schema.Resource {
|
|||
DefaultLayerName: "Java App Server",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"jvm_type": &opsworksLayerTypeAttribute{
|
||||
"jvm_type": {
|
||||
AttrName: "Jvm",
|
||||
Type: schema.TypeString,
|
||||
Default: "openjdk",
|
||||
},
|
||||
"jvm_version": &opsworksLayerTypeAttribute{
|
||||
"jvm_version": {
|
||||
AttrName: "JvmVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "7",
|
||||
},
|
||||
"jvm_options": &opsworksLayerTypeAttribute{
|
||||
"jvm_options": {
|
||||
AttrName: "JvmOptions",
|
||||
Type: schema.TypeString,
|
||||
Default: "",
|
||||
},
|
||||
"app_server": &opsworksLayerTypeAttribute{
|
||||
"app_server": {
|
||||
AttrName: "JavaAppServer",
|
||||
Type: schema.TypeString,
|
||||
Default: "tomcat",
|
||||
},
|
||||
"app_server_version": &opsworksLayerTypeAttribute{
|
||||
"app_server_version": {
|
||||
AttrName: "JavaAppServerVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "7",
|
||||
|
|
|
@ -10,7 +10,7 @@ func resourceAwsOpsworksMemcachedLayer() *schema.Resource {
|
|||
DefaultLayerName: "Memcached",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"allocated_memory": &opsworksLayerTypeAttribute{
|
||||
"allocated_memory": {
|
||||
AttrName: "MemcachedMemory",
|
||||
Type: schema.TypeInt,
|
||||
Default: 512,
|
||||
|
|
|
@ -10,12 +10,12 @@ func resourceAwsOpsworksMysqlLayer() *schema.Resource {
|
|||
DefaultLayerName: "MySQL",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"root_password": &opsworksLayerTypeAttribute{
|
||||
"root_password": {
|
||||
AttrName: "MysqlRootPassword",
|
||||
Type: schema.TypeString,
|
||||
WriteOnly: true,
|
||||
},
|
||||
"root_password_on_all_instances": &opsworksLayerTypeAttribute{
|
||||
"root_password_on_all_instances": {
|
||||
AttrName: "MysqlRootPasswordUbiquitous",
|
||||
Type: schema.TypeBool,
|
||||
Default: true,
|
||||
|
|
|
@ -10,7 +10,7 @@ func resourceAwsOpsworksNodejsAppLayer() *schema.Resource {
|
|||
DefaultLayerName: "Node.js App Server",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"nodejs_version": &opsworksLayerTypeAttribute{
|
||||
"nodejs_version": {
|
||||
AttrName: "NodejsVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "0.10.38",
|
||||
|
|
|
@ -20,26 +20,26 @@ func resourceAwsOpsworksPermission() *schema.Resource {
|
|||
Read: resourceAwsOpsworksPermissionRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"allow_ssh": &schema.Schema{
|
||||
"allow_ssh": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
"allow_sudo": &schema.Schema{
|
||||
"allow_sudo": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
"user_arn": &schema.Schema{
|
||||
"user_arn": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
// one of deny, show, deploy, manage, iam_only
|
||||
"level": &schema.Schema{
|
||||
"level": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
|
@ -61,7 +61,7 @@ func resourceAwsOpsworksPermission() *schema.Resource {
|
|||
return
|
||||
},
|
||||
},
|
||||
"stack_id": &schema.Schema{
|
||||
"stack_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksPermissionDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksPermissionCreate(sName, "true", "true", "iam_only"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksPermissionExists(
|
||||
|
@ -37,7 +37,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksPermissionCreate(sName, "true", "false", "iam_only"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksPermissionExists(
|
||||
|
@ -54,7 +54,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "deny"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksPermissionExists(
|
||||
|
@ -71,7 +71,7 @@ func TestAccAWSOpsworksPermission(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "show"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksPermissionExists(
|
||||
|
|
|
@ -10,32 +10,32 @@ func resourceAwsOpsworksRailsAppLayer() *schema.Resource {
|
|||
DefaultLayerName: "Rails App Server",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
"ruby_version": &opsworksLayerTypeAttribute{
|
||||
"ruby_version": {
|
||||
AttrName: "RubyVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "2.0.0",
|
||||
},
|
||||
"app_server": &opsworksLayerTypeAttribute{
|
||||
"app_server": {
|
||||
AttrName: "RailsStack",
|
||||
Type: schema.TypeString,
|
||||
Default: "apache_passenger",
|
||||
},
|
||||
"passenger_version": &opsworksLayerTypeAttribute{
|
||||
"passenger_version": {
|
||||
AttrName: "PassengerVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "4.0.46",
|
||||
},
|
||||
"rubygems_version": &opsworksLayerTypeAttribute{
|
||||
"rubygems_version": {
|
||||
AttrName: "RubygemsVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "2.2.2",
|
||||
},
|
||||
"manage_bundler": &opsworksLayerTypeAttribute{
|
||||
"manage_bundler": {
|
||||
AttrName: "ManageBundler",
|
||||
Type: schema.TypeBool,
|
||||
Default: true,
|
||||
},
|
||||
"bundler_version": &opsworksLayerTypeAttribute{
|
||||
"bundler_version": {
|
||||
AttrName: "BundlerVersion",
|
||||
Type: schema.TypeString,
|
||||
Default: "1.5.3",
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestAccAWSOpsworksRailsAppLayer(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksRailsAppLayerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksRailsAppLayerConfigVpcCreate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -33,7 +33,7 @@ func TestAccAWSOpsworksRailsAppLayer(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksRailsAppLayerNoManageBundlerConfigVpcCreate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
|
|
|
@ -20,26 +20,26 @@ func resourceAwsOpsworksRdsDbInstance() *schema.Resource {
|
|||
Read: resourceAwsOpsworksRdsDbInstanceRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"stack_id": &schema.Schema{
|
||||
"stack_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"rds_db_instance_arn": &schema.Schema{
|
||||
"rds_db_instance_arn": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"db_password": &schema.Schema{
|
||||
"db_password": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
"db_user": &schema.Schema{
|
||||
"db_user": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksRdsDbDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksRdsDbInstance(sName, "foo", "barbarbarbar"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksRdsDbExists(
|
||||
|
@ -31,7 +31,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "barbarbarbar"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksRdsDbExists(
|
||||
|
@ -42,7 +42,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "foofoofoofoofoo"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksRdsDbExists(
|
||||
|
@ -53,7 +53,7 @@ func TestAccAWSOpsworksRdsDbInstance(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksRdsDbInstanceForceNew(sName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksRdsDbExists(
|
||||
|
|
|
@ -25,99 +25,99 @@ func resourceAwsOpsworksStack() *schema.Resource {
|
|||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"agent_version": &schema.Schema{
|
||||
"agent_version": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"name": &schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
"region": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"service_role_arn": &schema.Schema{
|
||||
"service_role_arn": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"default_instance_profile_arn": &schema.Schema{
|
||||
"default_instance_profile_arn": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"color": &schema.Schema{
|
||||
"color": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"configuration_manager_name": &schema.Schema{
|
||||
"configuration_manager_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "Chef",
|
||||
},
|
||||
|
||||
"configuration_manager_version": &schema.Schema{
|
||||
"configuration_manager_version": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "11.4",
|
||||
},
|
||||
|
||||
"manage_berkshelf": &schema.Schema{
|
||||
"manage_berkshelf": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"berkshelf_version": &schema.Schema{
|
||||
"berkshelf_version": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "3.2.0",
|
||||
},
|
||||
|
||||
"custom_cookbooks_source": &schema.Schema{
|
||||
"custom_cookbooks_source": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"type": &schema.Schema{
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"url": &schema.Schema{
|
||||
"url": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"username": &schema.Schema{
|
||||
"username": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"password": &schema.Schema{
|
||||
"password": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"revision": &schema.Schema{
|
||||
"revision": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"ssh_key": &schema.Schema{
|
||||
"ssh_key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -125,58 +125,58 @@ func resourceAwsOpsworksStack() *schema.Resource {
|
|||
},
|
||||
},
|
||||
|
||||
"custom_json": &schema.Schema{
|
||||
"custom_json": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"default_availability_zone": &schema.Schema{
|
||||
"default_availability_zone": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"default_os": &schema.Schema{
|
||||
"default_os": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "Ubuntu 12.04 LTS",
|
||||
},
|
||||
|
||||
"default_root_device_type": &schema.Schema{
|
||||
"default_root_device_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "instance-store",
|
||||
},
|
||||
|
||||
"default_ssh_key_name": &schema.Schema{
|
||||
"default_ssh_key_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"default_subnet_id": &schema.Schema{
|
||||
"default_subnet_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"hostname_theme": &schema.Schema{
|
||||
"hostname_theme": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "Layer_Dependent",
|
||||
},
|
||||
|
||||
"use_custom_cookbooks": &schema.Schema{
|
||||
"use_custom_cookbooks": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"use_opsworks_security_groups": &schema.Schema{
|
||||
"use_opsworks_security_groups": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
},
|
||||
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestAccAWSOpsworksStackNoVpc(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksStackExists(
|
||||
|
@ -36,10 +36,6 @@ func TestAccAWSOpsworksStackNoVpc(t *testing.T) {
|
|||
"us-east-1a", stackName),
|
||||
),
|
||||
},
|
||||
// resource.TestStep{
|
||||
// Config: testAccAWSOpsworksStackConfigNoVpcUpdate(stackName),
|
||||
// Check: testAccAwsOpsworksStackCheckResourceAttrsUpdate("us-east-1c", stackName),
|
||||
// },
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -52,7 +48,7 @@ func TestAccAWSOpsworksStackVpc(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksStackDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksStackConfigVpcCreate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksStackExists(
|
||||
|
@ -63,7 +59,7 @@ func TestAccAWSOpsworksStackVpc(t *testing.T) {
|
|||
"us-west-2a", stackName),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSOpsworksStackConfigVpcUpdate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksStackExists(
|
||||
|
@ -97,7 +93,7 @@ func testAccAwsOpsworksStackCheckResourceAttrsCreate(zone, stackName string) res
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_stack.tf-acc",
|
||||
"default_os",
|
||||
"Amazon Linux 2014.09",
|
||||
"Amazon Linux 2016.09",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_stack.tf-acc",
|
||||
|
@ -137,7 +133,7 @@ func testAccAwsOpsworksStackCheckResourceAttrsUpdate(zone, stackName string) res
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_stack.tf-acc",
|
||||
"default_os",
|
||||
"Amazon Linux 2014.09",
|
||||
"Amazon Linux 2015.09",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_stack.tf-acc",
|
||||
|
@ -240,7 +236,7 @@ func testAccCheckAWSOpsworksCreateStackAttributes(
|
|||
return fmt.Errorf("Unnexpected DefaultAvailabilityZone: %s", *opsstack.DefaultAvailabilityZone)
|
||||
}
|
||||
|
||||
if *opsstack.DefaultOs != "Amazon Linux 2014.09" {
|
||||
if *opsstack.DefaultOs != "Amazon Linux 2016.09" {
|
||||
return fmt.Errorf("Unnexpected stackName: %s", *opsstack.DefaultOs)
|
||||
}
|
||||
|
||||
|
@ -275,7 +271,7 @@ func testAccCheckAWSOpsworksUpdateStackAttributes(
|
|||
return fmt.Errorf("Unnexpected DefaultAvailabilityZone: %s", *opsstack.DefaultAvailabilityZone)
|
||||
}
|
||||
|
||||
if *opsstack.DefaultOs != "Amazon Linux 2014.09" {
|
||||
if *opsstack.DefaultOs != "Amazon Linux 2015.09" {
|
||||
return fmt.Errorf("Unnexpected stackName: %s", *opsstack.DefaultOs)
|
||||
}
|
||||
|
||||
|
@ -348,13 +344,16 @@ func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
|
|||
|
||||
func testAccAwsOpsworksStackConfigNoVpcCreate(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
resource "aws_opsworks_stack" "tf-acc" {
|
||||
name = "%s"
|
||||
region = "us-east-1"
|
||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||
default_availability_zone = "us-east-1a"
|
||||
default_os = "Amazon Linux 2014.09"
|
||||
default_os = "Amazon Linux 2016.09"
|
||||
default_root_device_type = "ebs"
|
||||
custom_json = "{\"key\": \"value\"}"
|
||||
configuration_manager_version = "11.10"
|
||||
|
@ -427,95 +426,6 @@ resource "aws_iam_instance_profile" "opsworks_instance" {
|
|||
}`, name, name, name, name, name)
|
||||
}
|
||||
|
||||
func testAccAWSOpsworksStackConfigNoVpcUpdate(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_opsworks_stack" "tf-acc" {
|
||||
name = "%s"
|
||||
region = "us-east-1"
|
||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||
default_availability_zone = "us-east-1a"
|
||||
default_os = "Amazon Linux 2014.09"
|
||||
default_root_device_type = "ebs"
|
||||
custom_json = "{\"key\": \"value\"}"
|
||||
configuration_manager_version = "11.10"
|
||||
use_opsworks_security_groups = false
|
||||
use_custom_cookbooks = true
|
||||
manage_berkshelf = true
|
||||
custom_cookbooks_source {
|
||||
type = "git"
|
||||
revision = "master"
|
||||
url = "https://github.com/aws/opsworks-example-cookbooks.git"
|
||||
username = "example"
|
||||
password = "example"
|
||||
}
|
||||
resource "aws_iam_role" "opsworks_service" {
|
||||
name = "%s_opsworks_service"
|
||||
assume_role_policy = <<EOT
|
||||
{
|
||||
"Version": "2008-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "opsworks.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "opsworks_service" {
|
||||
name = "%s_opsworks_service"
|
||||
role = "${aws_iam_role.opsworks_service.id}"
|
||||
policy = <<EOT
|
||||
{
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"ec2:*",
|
||||
"iam:PassRole",
|
||||
"cloudwatch:GetMetricStatistics",
|
||||
"elasticloadbalancing:*",
|
||||
"rds:*"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": ["*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "opsworks_instance" {
|
||||
name = "%s_opsworks_instance"
|
||||
assume_role_policy = <<EOT
|
||||
{
|
||||
"Version": "2008-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "opsworks_instance" {
|
||||
name = "%s_opsworks_instance"
|
||||
roles = ["${aws_iam_role.opsworks_instance.name}"]
|
||||
}
|
||||
`, name, name, name, name, name)
|
||||
}
|
||||
|
||||
////////////////////////////
|
||||
//// Tests for the VPC case
|
||||
////////////////////////////
|
||||
|
@ -537,7 +447,7 @@ resource "aws_opsworks_stack" "tf-acc" {
|
|||
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||
default_os = "Amazon Linux 2014.09"
|
||||
default_os = "Amazon Linux 2016.09"
|
||||
default_root_device_type = "ebs"
|
||||
custom_json = "{\"key\": \"value\"}"
|
||||
configuration_manager_version = "11.10"
|
||||
|
@ -628,7 +538,7 @@ resource "aws_opsworks_stack" "tf-acc" {
|
|||
default_subnet_id = "${aws_subnet.tf-acc.id}"
|
||||
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
|
||||
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
|
||||
default_os = "Amazon Linux 2014.09"
|
||||
default_os = "Amazon Linux 2015.09"
|
||||
default_root_device_type = "ebs"
|
||||
custom_json = "{\"key\": \"value\"}"
|
||||
configuration_manager_version = "11.10"
|
||||
|
|
|
@ -18,28 +18,29 @@ func resourceAwsOpsworksUserProfile() *schema.Resource {
|
|||
Delete: resourceAwsOpsworksUserProfileDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"user_arn": &schema.Schema{
|
||||
"user_arn": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"allow_self_management": &schema.Schema{
|
||||
"allow_self_management": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"ssh_username": &schema.Schema{
|
||||
"ssh_username": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"ssh_public_key": &schema.Schema{
|
||||
"ssh_public_key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
|
|
@ -14,14 +14,14 @@ import (
|
|||
|
||||
func TestAccAWSOpsworksUserProfile(t *testing.T) {
|
||||
rName := fmt.Sprintf("test-user-%d", acctest.RandInt())
|
||||
roleName := fmt.Sprintf("tf-ops-user-profile-%d", acctest.RandInt())
|
||||
updateRName := fmt.Sprintf("test-user-%d", acctest.RandInt())
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksUserProfileDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAwsOpsworksUserProfileCreate(rName, roleName),
|
||||
{
|
||||
Config: testAccAwsOpsworksUserProfileCreate(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksUserProfileExists(
|
||||
"aws_opsworks_user_profile.user", rName),
|
||||
|
@ -36,6 +36,22 @@ func TestAccAWSOpsworksUserProfile(t *testing.T) {
|
|||
),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAwsOpsworksUserProfileUpdate(rName, updateRName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksUserProfileExists(
|
||||
"aws_opsworks_user_profile.user", updateRName),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_user_profile.user", "ssh_public_key", "",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_user_profile.user", "ssh_username", updateRName,
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_user_profile.user", "allow_self_management", "false",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -114,7 +130,7 @@ func testAccCheckAwsOpsworksUserProfileDestroy(s *terraform.State) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func testAccAwsOpsworksUserProfileCreate(rn, roleName string) string {
|
||||
func testAccAwsOpsworksUserProfileCreate(rn string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_opsworks_user_profile" "user" {
|
||||
user_arn = "${aws_iam_user.user.arn}"
|
||||
|
@ -125,7 +141,24 @@ resource "aws_iam_user" "user" {
|
|||
name = "%s"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
%s
|
||||
`, rn, testAccAwsOpsworksStackConfigNoVpcCreate(roleName))
|
||||
`, rn)
|
||||
}
|
||||
|
||||
func testAccAwsOpsworksUserProfileUpdate(rn, updateRn string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_opsworks_user_profile" "user" {
|
||||
user_arn = "${aws_iam_user.new-user.arn}"
|
||||
ssh_username = "${aws_iam_user.new-user.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_user" "user" {
|
||||
name = "%s"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_user" "new-user" {
|
||||
name = "%s"
|
||||
path = "/"
|
||||
}
|
||||
`, rn, updateRn)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
// How long to sleep if a limit-exceeded event happens
|
||||
var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id, " +
|
||||
"nat_gateway_id, instance_id, network_interface_id, route_table_id or " +
|
||||
"egress_only_gateway_id, nat_gateway_id, instance_id, network_interface_id, route_table_id or " +
|
||||
"vpc_peering_connection_id is allowed.")
|
||||
|
||||
// AWS Route resource Schema declaration
|
||||
|
@ -29,62 +29,73 @@ func resourceAwsRoute() *schema.Resource {
|
|||
Exists: resourceAwsRouteExists,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"destination_cidr_block": &schema.Schema{
|
||||
"destination_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"destination_ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"destination_prefix_list_id": &schema.Schema{
|
||||
"destination_prefix_list_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"gateway_id": &schema.Schema{
|
||||
"gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": &schema.Schema{
|
||||
"egress_only_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"instance_id": &schema.Schema{
|
||||
"nat_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"instance_owner_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"network_interface_id": &schema.Schema{
|
||||
"instance_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"origin": &schema.Schema{
|
||||
"instance_owner_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"state": &schema.Schema{
|
||||
"network_interface_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"origin": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"route_table_id": &schema.Schema{
|
||||
"state": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"route_table_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": &schema.Schema{
|
||||
"vpc_peering_connection_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -97,6 +108,7 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
|||
var numTargets int
|
||||
var setTarget string
|
||||
allowedTargets := []string{
|
||||
"egress_only_gateway_id",
|
||||
"gateway_id",
|
||||
"nat_gateway_id",
|
||||
"instance_id",
|
||||
|
@ -125,6 +137,12 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
|||
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
||||
GatewayId: aws.String(d.Get("gateway_id").(string)),
|
||||
}
|
||||
case "egress_only_gateway_id":
|
||||
createOpts = &ec2.CreateRouteInput{
|
||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||
DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)),
|
||||
EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)),
|
||||
}
|
||||
case "nat_gateway_id":
|
||||
createOpts = &ec2.CreateRouteInput{
|
||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||
|
@ -180,12 +198,25 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
|||
}
|
||||
|
||||
var route *ec2.Route
|
||||
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||
route, err = findResourceRoute(conn, d.Get("route_table_id").(string), d.Get("destination_cidr_block").(string))
|
||||
return resource.RetryableError(err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error finding route after creating it: %s", err)
|
||||
|
||||
if v, ok := d.GetOk("destination_cidr_block"); ok {
|
||||
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||
route, err = findResourceRoute(conn, d.Get("route_table_id").(string), v.(string), "")
|
||||
return resource.RetryableError(err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error finding route after creating it: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok {
|
||||
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
|
||||
route, err = findResourceRoute(conn, d.Get("route_table_id").(string), "", v.(string))
|
||||
return resource.RetryableError(err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error finding route after creating it: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
d.SetId(routeIDHash(d, route))
|
||||
|
@ -197,7 +228,10 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error {
|
|||
conn := meta.(*AWSClient).ec2conn
|
||||
routeTableId := d.Get("route_table_id").(string)
|
||||
|
||||
route, err := findResourceRoute(conn, routeTableId, d.Get("destination_cidr_block").(string))
|
||||
destinationCidrBlock := d.Get("destination_cidr_block").(string)
|
||||
destinationIpv6CidrBlock := d.Get("destination_ipv6_cidr_block").(string)
|
||||
|
||||
route, err := findResourceRoute(conn, routeTableId, destinationCidrBlock, destinationIpv6CidrBlock)
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" {
|
||||
log.Printf("[WARN] Route Table %q could not be found. Removing Route from state.",
|
||||
|
@ -214,6 +248,7 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error {
|
|||
func resourceAwsRouteSetResourceData(d *schema.ResourceData, route *ec2.Route) {
|
||||
d.Set("destination_prefix_list_id", route.DestinationPrefixListId)
|
||||
d.Set("gateway_id", route.GatewayId)
|
||||
d.Set("egress_only_gateway_id", route.EgressOnlyInternetGatewayId)
|
||||
d.Set("nat_gateway_id", route.NatGatewayId)
|
||||
d.Set("instance_id", route.InstanceId)
|
||||
d.Set("instance_owner_id", route.InstanceOwnerId)
|
||||
|
@ -229,6 +264,7 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||
var setTarget string
|
||||
|
||||
allowedTargets := []string{
|
||||
"egress_only_gateway_id",
|
||||
"gateway_id",
|
||||
"nat_gateway_id",
|
||||
"network_interface_id",
|
||||
|
@ -267,6 +303,12 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
||||
GatewayId: aws.String(d.Get("gateway_id").(string)),
|
||||
}
|
||||
case "egress_only_gateway_id":
|
||||
replaceOpts = &ec2.ReplaceRouteInput{
|
||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||
DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)),
|
||||
EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)),
|
||||
}
|
||||
case "nat_gateway_id":
|
||||
replaceOpts = &ec2.ReplaceRouteInput{
|
||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||
|
@ -309,8 +351,13 @@ func resourceAwsRouteDelete(d *schema.ResourceData, meta interface{}) error {
|
|||
conn := meta.(*AWSClient).ec2conn
|
||||
|
||||
deleteOpts := &ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
|
||||
RouteTableId: aws.String(d.Get("route_table_id").(string)),
|
||||
}
|
||||
if v, ok := d.GetOk("destination_cidr_block"); ok {
|
||||
deleteOpts.DestinationCidrBlock = aws.String(v.(string))
|
||||
}
|
||||
if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok {
|
||||
deleteOpts.DestinationIpv6CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
log.Printf("[DEBUG] Route delete opts: %s", deleteOpts)
|
||||
|
||||
|
@ -368,10 +415,19 @@ func resourceAwsRouteExists(d *schema.ResourceData, meta interface{}) (bool, err
|
|||
return false, nil
|
||||
}
|
||||
|
||||
cidr := d.Get("destination_cidr_block").(string)
|
||||
for _, route := range (*res.RouteTables[0]).Routes {
|
||||
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr {
|
||||
return true, nil
|
||||
if v, ok := d.GetOk("destination_cidr_block"); ok {
|
||||
for _, route := range (*res.RouteTables[0]).Routes {
|
||||
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == v.(string) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok {
|
||||
for _, route := range (*res.RouteTables[0]).Routes {
|
||||
if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == v.(string) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,11 +436,16 @@ func resourceAwsRouteExists(d *schema.ResourceData, meta interface{}) (bool, err
|
|||
|
||||
// Create an ID for a route
|
||||
func routeIDHash(d *schema.ResourceData, r *ec2.Route) string {
|
||||
|
||||
if r.DestinationIpv6CidrBlock != nil && *r.DestinationIpv6CidrBlock != "" {
|
||||
return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationIpv6CidrBlock))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationCidrBlock))
|
||||
}
|
||||
|
||||
// Helper: retrieve a route
|
||||
func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string) (*ec2.Route, error) {
|
||||
func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string, ipv6cidr string) (*ec2.Route, error) {
|
||||
routeTableID := rtbid
|
||||
|
||||
findOpts := &ec2.DescribeRouteTablesInput{
|
||||
|
@ -401,12 +462,29 @@ func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string) (*ec2.Route, er
|
|||
routeTableID)
|
||||
}
|
||||
|
||||
for _, route := range (*resp.RouteTables[0]).Routes {
|
||||
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr {
|
||||
return route, nil
|
||||
if cidr != "" {
|
||||
for _, route := range (*resp.RouteTables[0]).Routes {
|
||||
if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr {
|
||||
return route, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+
|
||||
"and destination CIDR block (%s).", rtbid, cidr)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+
|
||||
"and destination CIDR block (%s).", rtbid, cidr)
|
||||
if ipv6cidr != "" {
|
||||
for _, route := range (*resp.RouteTables[0]).Routes {
|
||||
if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == ipv6cidr {
|
||||
return route, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+
|
||||
"and destination IPv6 CIDR block (%s).", rtbid, ipv6cidr)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("When trying to find a matching route for Route Table %q "+
|
||||
"you need to specify a CIDR block of IPv6 CIDR Block", rtbid)
|
||||
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ func resourceAwsRouteTable() *schema.Resource {
|
|||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
|
@ -33,45 +33,55 @@ func resourceAwsRouteTable() *schema.Resource {
|
|||
|
||||
"tags": tagsSchema(),
|
||||
|
||||
"propagating_vgws": &schema.Schema{
|
||||
"propagating_vgws": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"route": &schema.Schema{
|
||||
"route": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cidr_block": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"gateway_id": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"instance_id": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": &schema.Schema{
|
||||
"egress_only_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": &schema.Schema{
|
||||
"gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"network_interface_id": &schema.Schema{
|
||||
"instance_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"network_interface_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
@ -166,6 +176,12 @@ func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error {
|
|||
if r.DestinationCidrBlock != nil {
|
||||
m["cidr_block"] = *r.DestinationCidrBlock
|
||||
}
|
||||
if r.DestinationIpv6CidrBlock != nil {
|
||||
m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock
|
||||
}
|
||||
if r.EgressOnlyInternetGatewayId != nil {
|
||||
m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId
|
||||
}
|
||||
if r.GatewayId != nil {
|
||||
m["gateway_id"] = *r.GatewayId
|
||||
}
|
||||
|
@ -266,14 +282,27 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error
|
|||
for _, route := range ors.List() {
|
||||
m := route.(map[string]interface{})
|
||||
|
||||
// Delete the route as it no longer exists in the config
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
d.Id(), m["cidr_block"].(string))
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(d.Id()),
|
||||
DestinationCidrBlock: aws.String(m["cidr_block"].(string)),
|
||||
})
|
||||
deleteOpts := &ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(d.Id()),
|
||||
}
|
||||
|
||||
if s := m["ipv6_cidr_block"].(string); s != "" {
|
||||
deleteOpts.DestinationIpv6CidrBlock = aws.String(s)
|
||||
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
d.Id(), m["ipv6_cidr_block"].(string))
|
||||
}
|
||||
|
||||
if s := m["cidr_block"].(string); s != "" {
|
||||
deleteOpts.DestinationCidrBlock = aws.String(s)
|
||||
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
d.Id(), m["cidr_block"].(string))
|
||||
}
|
||||
|
||||
_, err := conn.DeleteRoute(deleteOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -288,16 +317,39 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error
|
|||
m := route.(map[string]interface{})
|
||||
|
||||
opts := ec2.CreateRouteInput{
|
||||
RouteTableId: aws.String(d.Id()),
|
||||
DestinationCidrBlock: aws.String(m["cidr_block"].(string)),
|
||||
GatewayId: aws.String(m["gateway_id"].(string)),
|
||||
InstanceId: aws.String(m["instance_id"].(string)),
|
||||
VpcPeeringConnectionId: aws.String(m["vpc_peering_connection_id"].(string)),
|
||||
NetworkInterfaceId: aws.String(m["network_interface_id"].(string)),
|
||||
RouteTableId: aws.String(d.Id()),
|
||||
}
|
||||
|
||||
if m["nat_gateway_id"].(string) != "" {
|
||||
opts.NatGatewayId = aws.String(m["nat_gateway_id"].(string))
|
||||
if s := m["vpc_peering_connection_id"].(string); s != "" {
|
||||
opts.VpcPeeringConnectionId = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["network_interface_id"].(string); s != "" {
|
||||
opts.NetworkInterfaceId = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["instance_id"].(string); s != "" {
|
||||
opts.InstanceId = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["ipv6_cidr_block"].(string); s != "" {
|
||||
opts.DestinationIpv6CidrBlock = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["cidr_block"].(string); s != "" {
|
||||
opts.DestinationCidrBlock = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["gateway_id"].(string); s != "" {
|
||||
opts.GatewayId = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["egress_only_gateway_id"].(string); s != "" {
|
||||
opts.EgressOnlyInternetGatewayId = aws.String(s)
|
||||
}
|
||||
|
||||
if s := m["nat_gateway_id"].(string); s != "" {
|
||||
opts.NatGatewayId = aws.String(s)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Creating route for %s: %#v", d.Id(), opts)
|
||||
|
@ -402,6 +454,10 @@ func resourceAwsRouteTableHash(v interface{}) int {
|
|||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
|
||||
if v, ok := m["ipv6_cidr_block"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
if v, ok := m["cidr_block"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
@ -410,6 +466,10 @@ func resourceAwsRouteTableHash(v interface{}) int {
|
|||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
if v, ok := m["egress_only_gateway_id"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
natGatewaySet := false
|
||||
if v, ok := m["nat_gateway_id"]; ok {
|
||||
natGatewaySet = v.(string) != ""
|
||||
|
|
|
@ -63,7 +63,7 @@ func TestAccAWSRouteTable_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -72,7 +72,7 @@ func TestAccAWSRouteTable_basic(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfigChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -113,7 +113,7 @@ func TestAccAWSRouteTable_instance(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfigInstance,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -125,6 +125,35 @@ func TestAccAWSRouteTable_instance(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSRouteTable_ipv6(t *testing.T) {
|
||||
var v ec2.RouteTable
|
||||
|
||||
testCheck := func(*terraform.State) error {
|
||||
// Expect 3: 2 IPv6 (local + all outbound) + 1 IPv4
|
||||
if len(v.Routes) != 3 {
|
||||
return fmt.Errorf("bad routes: %#v", v.Routes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_route_table.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfigIpv6,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists("aws_route_table.foo", &v),
|
||||
testCheck,
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSRouteTable_tags(t *testing.T) {
|
||||
var route_table ec2.RouteTable
|
||||
|
||||
|
@ -134,7 +163,7 @@ func TestAccAWSRouteTable_tags(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfigTags,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists("aws_route_table.foo", &route_table),
|
||||
|
@ -142,7 +171,7 @@ func TestAccAWSRouteTable_tags(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfigTagsUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists("aws_route_table.foo", &route_table),
|
||||
|
@ -244,7 +273,7 @@ func TestAccAWSRouteTable_vpcPeering(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableVpcPeeringConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -285,7 +314,7 @@ func TestAccAWSRouteTable_vgwRoutePropagation(t *testing.T) {
|
|||
testAccCheckRouteTableDestroy,
|
||||
),
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableVgwRoutePropagationConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -342,6 +371,26 @@ resource "aws_route_table" "foo" {
|
|||
}
|
||||
`
|
||||
|
||||
const testAccRouteTableConfigIpv6 = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
assign_generated_ipv6_cidr_block = true
|
||||
}
|
||||
|
||||
resource "aws_egress_only_internet_gateway" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
|
||||
route {
|
||||
ipv6_cidr_block = "::/0"
|
||||
egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
const testAccRouteTableConfigInstance = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
|
|
|
@ -38,7 +38,7 @@ func TestAccAWSRoute_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteBasicConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||
|
@ -49,6 +49,43 @@ func TestAccAWSRoute_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSRoute_ipv6Support(t *testing.T) {
|
||||
var route ec2.Route
|
||||
|
||||
//aws creates a default route
|
||||
testCheck := func(s *terraform.State) error {
|
||||
|
||||
name := "aws_egress_only_internet_gateway.foo"
|
||||
gwres, ok := s.RootModule().Resources[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s\n", name)
|
||||
}
|
||||
|
||||
if *route.EgressOnlyInternetGatewayId != gwres.Primary.ID {
|
||||
return fmt.Errorf("Egress Only Internet Gateway Id (Expected=%s, Actual=%s)\n", gwres.Primary.ID, *route.EgressOnlyInternetGatewayId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() {
|
||||
testAccPreCheck(t)
|
||||
},
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteConfigIpv6,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||
testCheck,
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSRoute_changeCidr(t *testing.T) {
|
||||
var route ec2.Route
|
||||
var routeTable ec2.RouteTable
|
||||
|
@ -101,14 +138,14 @@ func TestAccAWSRoute_changeCidr(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteBasicConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||
testCheck,
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteBasicConfigChangeCidr,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||
|
@ -139,14 +176,14 @@ func TestAccAWSRoute_noopdiff(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteNoopChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.test", &route),
|
||||
testCheck,
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteNoopChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.test", &route),
|
||||
|
@ -166,7 +203,7 @@ func TestAccAWSRoute_doesNotCrashWithVPCEndpoint(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSRouteWithVPCEndpoint,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRouteExists("aws_route.bar", &route),
|
||||
|
@ -192,6 +229,7 @@ func testAccCheckAWSRouteExists(n string, res *ec2.Route) resource.TestCheckFunc
|
|||
conn,
|
||||
rs.Primary.Attributes["route_table_id"],
|
||||
rs.Primary.Attributes["destination_cidr_block"],
|
||||
rs.Primary.Attributes["destination_ipv6_cidr_block"],
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
@ -219,6 +257,7 @@ func testAccCheckAWSRouteDestroy(s *terraform.State) error {
|
|||
conn,
|
||||
rs.Primary.Attributes["route_table_id"],
|
||||
rs.Primary.Attributes["destination_cidr_block"],
|
||||
rs.Primary.Attributes["destination_ipv6_cidr_block"],
|
||||
)
|
||||
|
||||
if route == nil && err == nil {
|
||||
|
@ -249,6 +288,29 @@ resource "aws_route" "bar" {
|
|||
}
|
||||
`)
|
||||
|
||||
var testAccAWSRouteConfigIpv6 = fmt.Sprintf(`
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
assign_generated_ipv6_cidr_block = true
|
||||
}
|
||||
|
||||
resource "aws_egress_only_internet_gateway" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
}
|
||||
|
||||
resource "aws_route" "bar" {
|
||||
route_table_id = "${aws_route_table.foo.id}"
|
||||
destination_ipv6_cidr_block = "::/0"
|
||||
egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}"
|
||||
}
|
||||
|
||||
|
||||
`)
|
||||
|
||||
var testAccAWSRouteBasicConfigChangeCidr = fmt.Sprint(`
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
|
|
|
@ -319,7 +319,7 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e
|
|||
}
|
||||
_, err := s3conn.DeleteObject(&input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting S3 bucket object: %s", err)
|
||||
return fmt.Errorf("Error deleting S3 bucket object: %s Bucket: %q Object: %q", err, bucket, key)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
|||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -44,7 +44,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
|||
},
|
||||
},
|
||||
|
||||
"name_prefix": &schema.Schema{
|
||||
"name_prefix": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
|
@ -58,7 +58,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
|||
},
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
|
@ -73,49 +73,55 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
|||
},
|
||||
},
|
||||
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ingress": &schema.Schema{
|
||||
"ingress": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": &schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: protocolStateFunc,
|
||||
},
|
||||
|
||||
"cidr_blocks": &schema.Schema{
|
||||
"cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"security_groups": &schema.Schema{
|
||||
"ipv6_cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"security_groups": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"self": &schema.Schema{
|
||||
"self": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
|
@ -125,48 +131,54 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
|||
Set: resourceAwsSecurityGroupRuleHash,
|
||||
},
|
||||
|
||||
"egress": &schema.Schema{
|
||||
"egress": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": &schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: protocolStateFunc,
|
||||
},
|
||||
|
||||
"cidr_blocks": &schema.Schema{
|
||||
"cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"prefix_list_ids": &schema.Schema{
|
||||
"ipv6_cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"security_groups": &schema.Schema{
|
||||
"prefix_list_ids": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"security_groups": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"self": &schema.Schema{
|
||||
"self": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
|
@ -176,7 +188,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
|
|||
Set: resourceAwsSecurityGroupRuleHash,
|
||||
},
|
||||
|
||||
"owner_id": &schema.Schema{
|
||||
"owner_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
@ -252,11 +264,11 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
|
|||
req := &ec2.RevokeSecurityGroupEgressInput{
|
||||
GroupId: createResp.GroupId,
|
||||
IpPermissions: []*ec2.IpPermission{
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
FromPort: aws.Int64(int64(0)),
|
||||
ToPort: aws.Int64(int64(0)),
|
||||
IpRanges: []*ec2.IpRange{
|
||||
&ec2.IpRange{
|
||||
{
|
||||
CidrIp: aws.String("0.0.0.0/0"),
|
||||
},
|
||||
},
|
||||
|
@ -412,6 +424,18 @@ func resourceAwsSecurityGroupRuleHash(v interface{}) int {
|
|||
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||
}
|
||||
}
|
||||
if v, ok := m["ipv6_cidr_blocks"]; ok {
|
||||
vs := v.([]interface{})
|
||||
s := make([]string, len(vs))
|
||||
for i, raw := range vs {
|
||||
s[i] = raw.(string)
|
||||
}
|
||||
sort.Strings(s)
|
||||
|
||||
for _, v := range s {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||
}
|
||||
}
|
||||
if v, ok := m["prefix_list_ids"]; ok {
|
||||
vs := v.([]interface{})
|
||||
s := make([]string, len(vs))
|
||||
|
@ -476,6 +500,20 @@ func resourceAwsSecurityGroupIPPermGather(groupId string, permissions []*ec2.IpP
|
|||
m["cidr_blocks"] = list
|
||||
}
|
||||
|
||||
if len(perm.Ipv6Ranges) > 0 {
|
||||
raw, ok := m["ipv6_cidr_blocks"]
|
||||
if !ok {
|
||||
raw = make([]string, 0, len(perm.Ipv6Ranges))
|
||||
}
|
||||
list := raw.([]string)
|
||||
|
||||
for _, ip := range perm.Ipv6Ranges {
|
||||
list = append(list, *ip.CidrIpv6)
|
||||
}
|
||||
|
||||
m["ipv6_cidr_blocks"] = list
|
||||
}
|
||||
|
||||
if len(perm.PrefixListIds) > 0 {
|
||||
raw, ok := m["prefix_list_ids"]
|
||||
if !ok {
|
||||
|
@ -699,8 +737,9 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
// local rule we're examining
|
||||
rHash := idHash(rType, r["protocol"].(string), r["to_port"].(int64), r["from_port"].(int64), remoteSelfVal)
|
||||
if rHash == localHash {
|
||||
var numExpectedCidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemotePrefixLists, numRemoteSGs int
|
||||
var numExpectedCidrs, numExpectedIpv6Cidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemoteIpv6Cidrs, numRemotePrefixLists, numRemoteSGs int
|
||||
var matchingCidrs []string
|
||||
var matchingIpv6Cidrs []string
|
||||
var matchingSGs []string
|
||||
var matchingPrefixLists []string
|
||||
|
||||
|
@ -710,6 +749,10 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
if ok {
|
||||
numExpectedCidrs = len(l["cidr_blocks"].([]interface{}))
|
||||
}
|
||||
liRaw, ok := l["ipv6_cidr_blocks"]
|
||||
if ok {
|
||||
numExpectedIpv6Cidrs = len(l["ipv6_cidr_blocks"].([]interface{}))
|
||||
}
|
||||
lpRaw, ok := l["prefix_list_ids"]
|
||||
if ok {
|
||||
numExpectedPrefixLists = len(l["prefix_list_ids"].([]interface{}))
|
||||
|
@ -723,6 +766,10 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
if ok {
|
||||
numRemoteCidrs = len(r["cidr_blocks"].([]string))
|
||||
}
|
||||
riRaw, ok := r["ipv6_cidr_blocks"]
|
||||
if ok {
|
||||
numRemoteIpv6Cidrs = len(r["ipv6_cidr_blocks"].([]string))
|
||||
}
|
||||
rpRaw, ok := r["prefix_list_ids"]
|
||||
if ok {
|
||||
numRemotePrefixLists = len(r["prefix_list_ids"].([]string))
|
||||
|
@ -738,6 +785,10 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
log.Printf("[DEBUG] Local rule has more CIDR blocks, continuing (%d/%d)", numExpectedCidrs, numRemoteCidrs)
|
||||
continue
|
||||
}
|
||||
if numExpectedIpv6Cidrs > numRemoteIpv6Cidrs {
|
||||
log.Printf("[DEBUG] Local rule has more IPV6 CIDR blocks, continuing (%d/%d)", numExpectedIpv6Cidrs, numRemoteIpv6Cidrs)
|
||||
continue
|
||||
}
|
||||
if numExpectedPrefixLists > numRemotePrefixLists {
|
||||
log.Printf("[DEBUG] Local rule has more prefix lists, continuing (%d/%d)", numExpectedPrefixLists, numRemotePrefixLists)
|
||||
continue
|
||||
|
@ -775,6 +826,29 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
}
|
||||
}
|
||||
|
||||
//IPV6 CIDRs
|
||||
var localIpv6Cidrs []interface{}
|
||||
if liRaw != nil {
|
||||
localIpv6Cidrs = liRaw.([]interface{})
|
||||
}
|
||||
localIpv6CidrSet := schema.NewSet(schema.HashString, localIpv6Cidrs)
|
||||
|
||||
var remoteIpv6Cidrs []string
|
||||
if riRaw != nil {
|
||||
remoteIpv6Cidrs = riRaw.([]string)
|
||||
}
|
||||
var listIpv6 []interface{}
|
||||
for _, s := range remoteIpv6Cidrs {
|
||||
listIpv6 = append(listIpv6, s)
|
||||
}
|
||||
remoteIpv6CidrSet := schema.NewSet(schema.HashString, listIpv6)
|
||||
|
||||
for _, s := range localIpv6CidrSet.List() {
|
||||
if remoteIpv6CidrSet.Contains(s) {
|
||||
matchingIpv6Cidrs = append(matchingIpv6Cidrs, s.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// match prefix lists by converting both to sets, and using Set methods
|
||||
var localPrefixLists []interface{}
|
||||
if lpRaw != nil {
|
||||
|
@ -830,73 +904,93 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
// match, and then remove those elements from the remote rule, so that
|
||||
// this remote rule can still be considered by other local rules
|
||||
if numExpectedCidrs == len(matchingCidrs) {
|
||||
if numExpectedPrefixLists == len(matchingPrefixLists) {
|
||||
if numExpectedSGs == len(matchingSGs) {
|
||||
// confirm that self references match
|
||||
var lSelf bool
|
||||
var rSelf bool
|
||||
if _, ok := l["self"]; ok {
|
||||
lSelf = l["self"].(bool)
|
||||
}
|
||||
if _, ok := r["self"]; ok {
|
||||
rSelf = r["self"].(bool)
|
||||
}
|
||||
if rSelf == lSelf {
|
||||
delete(r, "self")
|
||||
// pop local cidrs from remote
|
||||
diffCidr := remoteCidrSet.Difference(localCidrSet)
|
||||
var newCidr []string
|
||||
for _, cRaw := range diffCidr.List() {
|
||||
newCidr = append(newCidr, cRaw.(string))
|
||||
if numExpectedIpv6Cidrs == len(matchingIpv6Cidrs) {
|
||||
if numExpectedPrefixLists == len(matchingPrefixLists) {
|
||||
if numExpectedSGs == len(matchingSGs) {
|
||||
// confirm that self references match
|
||||
var lSelf bool
|
||||
var rSelf bool
|
||||
if _, ok := l["self"]; ok {
|
||||
lSelf = l["self"].(bool)
|
||||
}
|
||||
|
||||
// reassigning
|
||||
if len(newCidr) > 0 {
|
||||
r["cidr_blocks"] = newCidr
|
||||
} else {
|
||||
delete(r, "cidr_blocks")
|
||||
if _, ok := r["self"]; ok {
|
||||
rSelf = r["self"].(bool)
|
||||
}
|
||||
if rSelf == lSelf {
|
||||
delete(r, "self")
|
||||
// pop local cidrs from remote
|
||||
diffCidr := remoteCidrSet.Difference(localCidrSet)
|
||||
var newCidr []string
|
||||
for _, cRaw := range diffCidr.List() {
|
||||
newCidr = append(newCidr, cRaw.(string))
|
||||
}
|
||||
|
||||
// pop local prefix lists from remote
|
||||
diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet)
|
||||
var newPrefixLists []string
|
||||
for _, pRaw := range diffPrefixLists.List() {
|
||||
newPrefixLists = append(newPrefixLists, pRaw.(string))
|
||||
// reassigning
|
||||
if len(newCidr) > 0 {
|
||||
r["cidr_blocks"] = newCidr
|
||||
} else {
|
||||
delete(r, "cidr_blocks")
|
||||
}
|
||||
|
||||
//// IPV6
|
||||
//// Comparison
|
||||
diffIpv6Cidr := remoteIpv6CidrSet.Difference(localIpv6CidrSet)
|
||||
var newIpv6Cidr []string
|
||||
for _, cRaw := range diffIpv6Cidr.List() {
|
||||
newIpv6Cidr = append(newIpv6Cidr, cRaw.(string))
|
||||
}
|
||||
|
||||
// reassigning
|
||||
if len(newIpv6Cidr) > 0 {
|
||||
r["ipv6_cidr_blocks"] = newIpv6Cidr
|
||||
} else {
|
||||
delete(r, "ipv6_cidr_blocks")
|
||||
}
|
||||
|
||||
// pop local prefix lists from remote
|
||||
diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet)
|
||||
var newPrefixLists []string
|
||||
for _, pRaw := range diffPrefixLists.List() {
|
||||
newPrefixLists = append(newPrefixLists, pRaw.(string))
|
||||
}
|
||||
|
||||
// reassigning
|
||||
if len(newPrefixLists) > 0 {
|
||||
r["prefix_list_ids"] = newPrefixLists
|
||||
} else {
|
||||
delete(r, "prefix_list_ids")
|
||||
}
|
||||
|
||||
// pop local sgs from remote
|
||||
diffSGs := remoteSGSet.Difference(localSGSet)
|
||||
if len(diffSGs.List()) > 0 {
|
||||
r["security_groups"] = diffSGs
|
||||
} else {
|
||||
delete(r, "security_groups")
|
||||
}
|
||||
|
||||
saves = append(saves, l)
|
||||
}
|
||||
|
||||
// reassigning
|
||||
if len(newPrefixLists) > 0 {
|
||||
r["prefix_list_ids"] = newPrefixLists
|
||||
} else {
|
||||
delete(r, "prefix_list_ids")
|
||||
}
|
||||
|
||||
// pop local sgs from remote
|
||||
diffSGs := remoteSGSet.Difference(localSGSet)
|
||||
if len(diffSGs.List()) > 0 {
|
||||
r["security_groups"] = diffSGs
|
||||
} else {
|
||||
delete(r, "security_groups")
|
||||
}
|
||||
|
||||
saves = append(saves, l)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Here we catch any remote rules that have not been stripped of all self,
|
||||
// cidrs, and security groups. We'll add remote rules here that have not been
|
||||
// matched locally, and let the graph sort things out. This will happen when
|
||||
// rules are added externally to Terraform
|
||||
for _, r := range remote {
|
||||
var lenCidr, lenPrefixLists, lenSGs int
|
||||
var lenCidr, lenIpv6Cidr, lenPrefixLists, lenSGs int
|
||||
if rCidrs, ok := r["cidr_blocks"]; ok {
|
||||
lenCidr = len(rCidrs.([]string))
|
||||
}
|
||||
if rIpv6Cidrs, ok := r["ipv6_cidr_blocks"]; ok {
|
||||
lenIpv6Cidr = len(rIpv6Cidrs.([]string))
|
||||
}
|
||||
if rPrefixLists, ok := r["prefix_list_ids"]; ok {
|
||||
lenPrefixLists = len(rPrefixLists.([]string))
|
||||
}
|
||||
|
@ -910,7 +1004,7 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
|
|||
}
|
||||
}
|
||||
|
||||
if lenSGs+lenCidr+lenPrefixLists > 0 {
|
||||
if lenSGs+lenCidr+lenIpv6Cidr+lenPrefixLists > 0 {
|
||||
log.Printf("[DEBUG] Found a remote Rule that wasn't empty: (%#v)", r)
|
||||
saves = append(saves, r)
|
||||
}
|
||||
|
@ -1003,15 +1097,15 @@ func deleteLingeringLambdaENIs(conn *ec2.EC2, d *schema.ResourceData) error {
|
|||
// Here we carefully find the offenders
|
||||
params := &ec2.DescribeNetworkInterfacesInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("group-id"),
|
||||
Values: []*string{aws.String(d.Id())},
|
||||
},
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("description"),
|
||||
Values: []*string{aws.String("AWS Lambda VPC ENI: *")},
|
||||
},
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("requester-id"),
|
||||
Values: []*string{aws.String("*:awslambda_*")},
|
||||
},
|
||||
|
|
|
@ -61,6 +61,13 @@ func resourceAwsSecurityGroupRule() *schema.Resource {
|
|||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"ipv6_cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"prefix_list_ids": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
|
@ -400,6 +407,19 @@ func findRuleMatch(p *ec2.IpPermission, rules []*ec2.IpPermission, isVPC bool) *
|
|||
continue
|
||||
}
|
||||
|
||||
remaining = len(p.Ipv6Ranges)
|
||||
for _, ipv6 := range p.Ipv6Ranges {
|
||||
for _, ipv6ip := range r.Ipv6Ranges {
|
||||
if *ipv6.CidrIpv6 == *ipv6ip.CidrIpv6 {
|
||||
remaining--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if remaining > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
remaining = len(p.PrefixListIds)
|
||||
for _, pl := range p.PrefixListIds {
|
||||
for _, rpl := range r.PrefixListIds {
|
||||
|
@ -463,6 +483,18 @@ func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string {
|
|||
}
|
||||
}
|
||||
|
||||
if len(ip.Ipv6Ranges) > 0 {
|
||||
s := make([]string, len(ip.Ipv6Ranges))
|
||||
for i, r := range ip.Ipv6Ranges {
|
||||
s[i] = *r.CidrIpv6
|
||||
}
|
||||
sort.Strings(s)
|
||||
|
||||
for _, v := range s {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||
}
|
||||
}
|
||||
|
||||
if len(ip.PrefixListIds) > 0 {
|
||||
s := make([]string, len(ip.PrefixListIds))
|
||||
for i, pl := range ip.PrefixListIds {
|
||||
|
@ -555,6 +587,18 @@ func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) (*ec2.IpPermiss
|
|||
}
|
||||
}
|
||||
|
||||
if raw, ok := d.GetOk("ipv6_cidr_blocks"); ok {
|
||||
list := raw.([]interface{})
|
||||
perm.Ipv6Ranges = make([]*ec2.Ipv6Range, len(list))
|
||||
for i, v := range list {
|
||||
cidrIP, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("empty element found in ipv6_cidr_blocks - consider using the compact function")
|
||||
}
|
||||
perm.Ipv6Ranges[i] = &ec2.Ipv6Range{CidrIpv6: aws.String(cidrIP)}
|
||||
}
|
||||
}
|
||||
|
||||
if raw, ok := d.GetOk("prefix_list_ids"); ok {
|
||||
list := raw.([]interface{})
|
||||
perm.PrefixListIds = make([]*ec2.PrefixListId, len(list))
|
||||
|
@ -584,6 +628,12 @@ func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPe
|
|||
|
||||
d.Set("cidr_blocks", cb)
|
||||
|
||||
var ipv6 []string
|
||||
for _, ip := range rule.Ipv6Ranges {
|
||||
ipv6 = append(ipv6, *ip.CidrIpv6)
|
||||
}
|
||||
d.Set("ipv6_cidr_blocks", ipv6)
|
||||
|
||||
var pl []string
|
||||
for _, p := range rule.PrefixListIds {
|
||||
pl = append(pl, *p.PrefixListId)
|
||||
|
@ -603,15 +653,16 @@ func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPe
|
|||
return nil
|
||||
}
|
||||
|
||||
// Validates that either 'cidr_blocks', 'self', or 'source_security_group_id' is set
|
||||
// Validates that either 'cidr_blocks', 'ipv6_cidr_blocks', 'self', or 'source_security_group_id' is set
|
||||
func validateAwsSecurityGroupRule(d *schema.ResourceData) error {
|
||||
_, blocksOk := d.GetOk("cidr_blocks")
|
||||
_, ipv6Ok := d.GetOk("ipv6_cidr_blocks")
|
||||
_, sourceOk := d.GetOk("source_security_group_id")
|
||||
_, selfOk := d.GetOk("self")
|
||||
_, prefixOk := d.GetOk("prefix_list_ids")
|
||||
if !blocksOk && !sourceOk && !selfOk && !prefixOk {
|
||||
if !blocksOk && !sourceOk && !selfOk && !prefixOk && !ipv6Ok {
|
||||
return fmt.Errorf(
|
||||
"One of ['cidr_blocks', 'self', 'source_security_group_id', 'prefix_list_ids'] must be set to create an AWS Security Group Rule")
|
||||
"One of ['cidr_blocks', 'ipv6_cidr_blocks', 'self', 'source_security_group_id', 'prefix_list_ids'] must be set to create an AWS Security Group Rule")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -52,15 +52,15 @@ func TestIpPermissionIDHash(t *testing.T) {
|
|||
FromPort: aws.Int64(int64(80)),
|
||||
ToPort: aws.Int64(int64(8000)),
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("987654321"),
|
||||
GroupId: aws.String("sg-12345678"),
|
||||
},
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("123456789"),
|
||||
GroupId: aws.String("sg-987654321"),
|
||||
},
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("123456789"),
|
||||
GroupId: aws.String("sg-12345678"),
|
||||
},
|
||||
|
@ -72,15 +72,15 @@ func TestIpPermissionIDHash(t *testing.T) {
|
|||
FromPort: aws.Int64(int64(80)),
|
||||
ToPort: aws.Int64(int64(8000)),
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("987654321"),
|
||||
GroupName: aws.String("my-security-group"),
|
||||
},
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("123456789"),
|
||||
GroupName: aws.String("my-security-group"),
|
||||
},
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("123456789"),
|
||||
GroupName: aws.String("my-other-security-group"),
|
||||
},
|
||||
|
@ -183,6 +183,46 @@ func TestAccAWSSecurityGroupRule_Ingress_Protocol(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSSecurityGroupRule_Ingress_Ipv6(t *testing.T) {
|
||||
var group ec2.SecurityGroup
|
||||
|
||||
testRuleCount := func(*terraform.State) error {
|
||||
if len(group.IpPermissions) != 1 {
|
||||
return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d",
|
||||
1, len(group.IpPermissions))
|
||||
}
|
||||
|
||||
rule := group.IpPermissions[0]
|
||||
if *rule.FromPort != int64(80) {
|
||||
return fmt.Errorf("Wrong Security Group port setting, expected %d, got %d",
|
||||
80, int(*rule.FromPort))
|
||||
}
|
||||
|
||||
ipv6Address := rule.Ipv6Ranges[0]
|
||||
if *ipv6Address.CidrIpv6 != "::/0" {
|
||||
return fmt.Errorf("Wrong Security Group IPv6 address, expected %s, got %s",
|
||||
"::/0", *ipv6Address.CidrIpv6)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupRuleIngress_ipv6Config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group),
|
||||
testRuleCount,
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) {
|
||||
var group ec2.SecurityGroup
|
||||
rInt := acctest.RandInt()
|
||||
|
@ -376,7 +416,7 @@ func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) {
|
|||
ToPort: aws.Int64(80),
|
||||
IpProtocol: aws.String("tcp"),
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
&ec2.UserIdGroupPair{GroupId: nat.GroupId},
|
||||
{GroupId: nat.GroupId},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -696,6 +736,34 @@ func testAccAWSSecurityGroupRuleIngressConfig(rInt int) string {
|
|||
}`, rInt)
|
||||
}
|
||||
|
||||
const testAccAWSSecurityGroupRuleIngress_ipv6Config = `
|
||||
resource "aws_vpc" "tftest" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
|
||||
tags {
|
||||
Name = "tf-testing"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "web" {
|
||||
vpc_id = "${aws_vpc.tftest.id}"
|
||||
|
||||
tags {
|
||||
Name = "tf-acc-test"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ingress_1" {
|
||||
type = "ingress"
|
||||
protocol = "6"
|
||||
from_port = 80
|
||||
to_port = 8000
|
||||
ipv6_cidr_blocks = ["::/0"]
|
||||
|
||||
security_group_id = "${aws_security_group.web.id}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSSecurityGroupRuleIngress_protocolConfig = `
|
||||
resource "aws_vpc" "tftest" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
|
|
|
@ -135,54 +135,54 @@ func TestProtocolForValue(t *testing.T) {
|
|||
|
||||
func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
||||
raw := []*ec2.IpPermission{
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
IpProtocol: aws.String("tcp"),
|
||||
FromPort: aws.Int64(int64(1)),
|
||||
ToPort: aws.Int64(int64(-1)),
|
||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("0.0.0.0/0")}},
|
||||
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("0.0.0.0/0")}},
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
GroupId: aws.String("sg-11111"),
|
||||
},
|
||||
},
|
||||
},
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
IpProtocol: aws.String("tcp"),
|
||||
FromPort: aws.Int64(int64(80)),
|
||||
ToPort: aws.Int64(int64(80)),
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
// VPC
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
GroupId: aws.String("sg-22222"),
|
||||
},
|
||||
},
|
||||
},
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
IpProtocol: aws.String("tcp"),
|
||||
FromPort: aws.Int64(int64(443)),
|
||||
ToPort: aws.Int64(int64(443)),
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
// Classic
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("12345"),
|
||||
GroupId: aws.String("sg-33333"),
|
||||
GroupName: aws.String("ec2_classic"),
|
||||
},
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
UserId: aws.String("amazon-elb"),
|
||||
GroupId: aws.String("sg-d2c979d3"),
|
||||
GroupName: aws.String("amazon-elb-sg"),
|
||||
},
|
||||
},
|
||||
},
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
IpProtocol: aws.String("-1"),
|
||||
FromPort: aws.Int64(int64(0)),
|
||||
ToPort: aws.Int64(int64(0)),
|
||||
PrefixListIds: []*ec2.PrefixListId{&ec2.PrefixListId{PrefixListId: aws.String("pl-12345678")}},
|
||||
PrefixListIds: []*ec2.PrefixListId{{PrefixListId: aws.String("pl-12345678")}},
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{
|
||||
// VPC
|
||||
&ec2.UserIdGroupPair{
|
||||
{
|
||||
GroupId: aws.String("sg-22222"),
|
||||
},
|
||||
},
|
||||
|
@ -190,14 +190,14 @@ func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
|||
}
|
||||
|
||||
local := []map[string]interface{}{
|
||||
map[string]interface{}{
|
||||
{
|
||||
"protocol": "tcp",
|
||||
"from_port": int64(1),
|
||||
"to_port": int64(-1),
|
||||
"cidr_blocks": []string{"0.0.0.0/0"},
|
||||
"self": true,
|
||||
},
|
||||
map[string]interface{}{
|
||||
{
|
||||
"protocol": "tcp",
|
||||
"from_port": int64(80),
|
||||
"to_port": int64(80),
|
||||
|
@ -205,7 +205,7 @@ func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
|||
"sg-22222",
|
||||
}),
|
||||
},
|
||||
map[string]interface{}{
|
||||
{
|
||||
"protocol": "tcp",
|
||||
"from_port": int64(443),
|
||||
"to_port": int64(443),
|
||||
|
@ -214,7 +214,7 @@ func TestResourceAwsSecurityGroupIPPermGather(t *testing.T) {
|
|||
"amazon-elb/amazon-elb-sg",
|
||||
}),
|
||||
},
|
||||
map[string]interface{}{
|
||||
{
|
||||
"protocol": "-1",
|
||||
"from_port": int64(0),
|
||||
"to_port": int64(0),
|
||||
|
@ -263,7 +263,7 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -288,6 +288,39 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSSecurityGroup_ipv6(t *testing.T) {
|
||||
var group ec2.SecurityGroup
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_security_group.web",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigIpv6,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "name", "terraform_acceptance_test_example"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "description", "Used in the terraform acceptance tests"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "ingress.2293451516.protocol", "tcp"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "ingress.2293451516.from_port", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "ingress.2293451516.to_port", "8000"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "ingress.2293451516.ipv6_cidr_blocks.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_security_group.web", "ingress.2293451516.ipv6_cidr_blocks.0", "::/0"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSSecurityGroup_tagsCreatedFirst(t *testing.T) {
|
||||
var group ec2.SecurityGroup
|
||||
|
||||
|
@ -296,7 +329,7 @@ func TestAccAWSSecurityGroup_tagsCreatedFirst(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigForTagsOrdering,
|
||||
ExpectError: regexp.MustCompile("InvalidParameterValue"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
|
@ -318,7 +351,7 @@ func TestAccAWSSecurityGroup_namePrefix(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupPrefixNameConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.baz", &group),
|
||||
|
@ -353,7 +386,7 @@ func TestAccAWSSecurityGroup_self(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigSelf,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -393,7 +426,7 @@ func TestAccAWSSecurityGroup_vpc(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigVpc,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -446,7 +479,7 @@ func TestAccAWSSecurityGroup_vpcNegOneIngress(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigVpcNegOneIngress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -488,7 +521,7 @@ func TestAccAWSSecurityGroup_vpcProtoNumIngress(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigVpcProtoNumIngress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -521,7 +554,7 @@ func TestAccAWSSecurityGroup_MultiIngress(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigMultiIngress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -540,13 +573,13 @@ func TestAccAWSSecurityGroup_Change(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -566,7 +599,7 @@ func TestAccAWSSecurityGroup_generatedName(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_generatedName,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -596,7 +629,7 @@ func TestAccAWSSecurityGroup_DefaultEgress_VPC(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigDefaultEgress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExistsWithoutDefault("aws_security_group.worker"),
|
||||
|
@ -616,7 +649,7 @@ func TestAccAWSSecurityGroup_DefaultEgress_Classic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigClassic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -634,7 +667,7 @@ func TestAccAWSSecurityGroup_drift(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_drift(),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -664,7 +697,7 @@ func TestAccAWSSecurityGroup_drift_complex(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_drift_complex(),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -773,7 +806,7 @@ func testAccCheckAWSSecurityGroupAttributes(group *ec2.SecurityGroup) resource.T
|
|||
FromPort: aws.Int64(80),
|
||||
ToPort: aws.Int64(8000),
|
||||
IpProtocol: aws.String("tcp"),
|
||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
||||
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}},
|
||||
}
|
||||
|
||||
if *group.GroupName != "terraform_acceptance_test_example" {
|
||||
|
@ -804,7 +837,7 @@ func testAccCheckAWSSecurityGroupAttributesNegOneProtocol(group *ec2.SecurityGro
|
|||
return func(s *terraform.State) error {
|
||||
p := &ec2.IpPermission{
|
||||
IpProtocol: aws.String("-1"),
|
||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
||||
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}},
|
||||
}
|
||||
|
||||
if *group.GroupName != "terraform_acceptance_test_example" {
|
||||
|
@ -839,7 +872,7 @@ func TestAccAWSSecurityGroup_tags(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigTags,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group),
|
||||
|
@ -847,7 +880,7 @@ func TestAccAWSSecurityGroup_tags(t *testing.T) {
|
|||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigTagsUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group),
|
||||
|
@ -868,7 +901,7 @@ func TestAccAWSSecurityGroup_CIDRandGroups(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupCombindCIDRandGroups,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.mixed", &group),
|
||||
|
@ -887,7 +920,7 @@ func TestAccAWSSecurityGroup_ingressWithCidrAndSGs(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -913,7 +946,7 @@ func TestAccAWSSecurityGroup_ingressWithCidrAndSGs_classic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs_classic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group),
|
||||
|
@ -938,7 +971,7 @@ func TestAccAWSSecurityGroup_egressWithPrefixList(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigPrefixListEgress,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.egress", &group),
|
||||
|
@ -1016,21 +1049,21 @@ func testAccCheckAWSSecurityGroupPrefixListAttributes(group *ec2.SecurityGroup)
|
|||
func testAccCheckAWSSecurityGroupAttributesChanged(group *ec2.SecurityGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
p := []*ec2.IpPermission{
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
FromPort: aws.Int64(80),
|
||||
ToPort: aws.Int64(9000),
|
||||
IpProtocol: aws.String("tcp"),
|
||||
IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}},
|
||||
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}},
|
||||
},
|
||||
&ec2.IpPermission{
|
||||
{
|
||||
FromPort: aws.Int64(80),
|
||||
ToPort: aws.Int64(8000),
|
||||
IpProtocol: aws.String("tcp"),
|
||||
IpRanges: []*ec2.IpRange{
|
||||
&ec2.IpRange{
|
||||
{
|
||||
CidrIp: aws.String("0.0.0.0/0"),
|
||||
},
|
||||
&ec2.IpRange{
|
||||
{
|
||||
CidrIp: aws.String("10.0.0.0/8"),
|
||||
},
|
||||
},
|
||||
|
@ -1109,7 +1142,7 @@ func TestAccAWSSecurityGroup_failWithDiffMismatch(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_failWithDiffMismatch,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSSecurityGroupExists("aws_security_group.nat", &group),
|
||||
|
@ -1148,6 +1181,36 @@ resource "aws_security_group" "web" {
|
|||
}
|
||||
}`
|
||||
|
||||
const testAccAWSSecurityGroupConfigIpv6 = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "web" {
|
||||
name = "terraform_acceptance_test_example"
|
||||
description = "Used in the terraform acceptance tests"
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
|
||||
ingress {
|
||||
protocol = "6"
|
||||
from_port = 80
|
||||
to_port = 8000
|
||||
ipv6_cidr_blocks = ["::/0"]
|
||||
}
|
||||
|
||||
egress {
|
||||
protocol = "tcp"
|
||||
from_port = 80
|
||||
to_port = 8000
|
||||
ipv6_cidr_blocks = ["::/0"]
|
||||
}
|
||||
|
||||
tags {
|
||||
Name = "tf-acc-test"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSSecurityGroupConfig = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
|
|
|
@ -2,9 +2,6 @@ package aws
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
@ -29,73 +26,79 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
|||
MigrateState: resourceAwsSpotFleetRequestMigrateState,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"iam_fleet_role": &schema.Schema{
|
||||
"iam_fleet_role": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"replace_unhealthy_instances": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
},
|
||||
// http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-SpotFleetLaunchSpecification
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html
|
||||
"launch_specification": &schema.Schema{
|
||||
"launch_specification": {
|
||||
Type: schema.TypeSet,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"vpc_security_group_ids": &schema.Schema{
|
||||
"vpc_security_group_ids": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"associate_public_ip_address": &schema.Schema{
|
||||
"associate_public_ip_address": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
"ebs_block_device": &schema.Schema{
|
||||
"ebs_block_device": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"delete_on_termination": &schema.Schema{
|
||||
"delete_on_termination": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"device_name": &schema.Schema{
|
||||
"device_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"encrypted": &schema.Schema{
|
||||
"encrypted": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"iops": &schema.Schema{
|
||||
"iops": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"snapshot_id": &schema.Schema{
|
||||
"snapshot_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"volume_size": &schema.Schema{
|
||||
"volume_size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"volume_type": &schema.Schema{
|
||||
"volume_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -105,18 +108,18 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
|||
},
|
||||
Set: hashEbsBlockDevice,
|
||||
},
|
||||
"ephemeral_block_device": &schema.Schema{
|
||||
"ephemeral_block_device": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"device_name": &schema.Schema{
|
||||
"device_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"virtual_name": &schema.Schema{
|
||||
"virtual_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
@ -124,7 +127,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
|||
},
|
||||
Set: hashEphemeralBlockDevice,
|
||||
},
|
||||
"root_block_device": &schema.Schema{
|
||||
"root_block_device": {
|
||||
// TODO: This is a set because we don't support singleton
|
||||
// sub-resources today. We'll enforce that the set only ever has
|
||||
// length zero or one below. When TF gains support for
|
||||
|
@ -137,25 +140,25 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
|||
// Termination flag on the block device mapping entry for the root
|
||||
// device volume." - bit.ly/ec2bdmap
|
||||
Schema: map[string]*schema.Schema{
|
||||
"delete_on_termination": &schema.Schema{
|
||||
"delete_on_termination": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"iops": &schema.Schema{
|
||||
"iops": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"volume_size": &schema.Schema{
|
||||
"volume_size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"volume_type": &schema.Schema{
|
||||
"volume_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -165,73 +168,74 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
|||
},
|
||||
Set: hashRootBlockDevice,
|
||||
},
|
||||
"ebs_optimized": &schema.Schema{
|
||||
"ebs_optimized": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
"iam_instance_profile": &schema.Schema{
|
||||
"iam_instance_profile": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
},
|
||||
"ami": &schema.Schema{
|
||||
"ami": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"instance_type": &schema.Schema{
|
||||
"instance_type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"key_name": &schema.Schema{
|
||||
"key_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validateSpotFleetRequestKeyName,
|
||||
},
|
||||
"monitoring": &schema.Schema{
|
||||
"monitoring": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
"placement_group": &schema.Schema{
|
||||
"placement_group": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"spot_price": &schema.Schema{
|
||||
"spot_price": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"user_data": &schema.Schema{
|
||||
"user_data": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
switch v.(type) {
|
||||
case string:
|
||||
hash := sha1.Sum([]byte(v.(string)))
|
||||
return hex.EncodeToString(hash[:])
|
||||
return userDataHashSum(v.(string))
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
},
|
||||
},
|
||||
"weighted_capacity": &schema.Schema{
|
||||
"weighted_capacity": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"subnet_id": &schema.Schema{
|
||||
"subnet_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"availability_zone": &schema.Schema{
|
||||
"availability_zone": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
|
@ -242,48 +246,48 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
|
|||
Set: hashLaunchSpecification,
|
||||
},
|
||||
// Everything on a spot fleet is ForceNew except target_capacity
|
||||
"target_capacity": &schema.Schema{
|
||||
"target_capacity": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: false,
|
||||
},
|
||||
"allocation_strategy": &schema.Schema{
|
||||
"allocation_strategy": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "lowestPrice",
|
||||
ForceNew: true,
|
||||
},
|
||||
"excess_capacity_termination_policy": &schema.Schema{
|
||||
"excess_capacity_termination_policy": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "Default",
|
||||
ForceNew: false,
|
||||
},
|
||||
"spot_price": &schema.Schema{
|
||||
"spot_price": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"terminate_instances_with_expiration": &schema.Schema{
|
||||
"terminate_instances_with_expiration": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"valid_from": &schema.Schema{
|
||||
"valid_from": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"valid_until": &schema.Schema{
|
||||
"valid_until": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"spot_request_state": &schema.Schema{
|
||||
"spot_request_state": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"client_token": &schema.Schema{
|
||||
"client_token": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
@ -323,8 +327,7 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
|
|||
}
|
||||
|
||||
if v, ok := d["user_data"]; ok {
|
||||
opts.UserData = aws.String(
|
||||
base64Encode([]byte(v.(string))))
|
||||
opts.UserData = aws.String(base64Encode([]byte(v.(string))))
|
||||
}
|
||||
|
||||
if v, ok := d["key_name"]; ok {
|
||||
|
@ -339,21 +342,11 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
|
|||
opts.WeightedCapacity = aws.Float64(wc)
|
||||
}
|
||||
|
||||
var groups []*string
|
||||
if v, ok := d["security_groups"]; ok {
|
||||
sgs := v.(*schema.Set).List()
|
||||
for _, v := range sgs {
|
||||
str := v.(string)
|
||||
groups = append(groups, aws.String(str))
|
||||
}
|
||||
}
|
||||
|
||||
var groupIds []*string
|
||||
var securityGroupIds []*string
|
||||
if v, ok := d["vpc_security_group_ids"]; ok {
|
||||
if s := v.(*schema.Set); s.Len() > 0 {
|
||||
for _, v := range s.List() {
|
||||
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: aws.String(v.(string))})
|
||||
groupIds = append(groupIds, aws.String(v.(string)))
|
||||
securityGroupIds = append(securityGroupIds, aws.String(v.(string)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -378,11 +371,15 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
|
|||
DeleteOnTermination: aws.Bool(true),
|
||||
DeviceIndex: aws.Int64(int64(0)),
|
||||
SubnetId: aws.String(subnetId.(string)),
|
||||
Groups: groupIds,
|
||||
Groups: securityGroupIds,
|
||||
}
|
||||
|
||||
opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni}
|
||||
opts.SubnetId = aws.String("")
|
||||
} else {
|
||||
for _, id := range securityGroupIds {
|
||||
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: id})
|
||||
}
|
||||
}
|
||||
|
||||
blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn)
|
||||
|
@ -534,6 +531,7 @@ func resourceAwsSpotFleetRequestCreate(d *schema.ResourceData, meta interface{})
|
|||
TargetCapacity: aws.Int64(int64(d.Get("target_capacity").(int))),
|
||||
ClientToken: aws.String(resource.UniqueId()),
|
||||
TerminateInstancesWithExpiration: aws.Bool(d.Get("terminate_instances_with_expiration").(bool)),
|
||||
ReplaceUnhealthyInstances: aws.Bool(d.Get("replace_unhealthy_instances").(bool)),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("excess_capacity_termination_policy"); ok {
|
||||
|
@ -725,29 +723,26 @@ func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) e
|
|||
aws.TimeValue(config.ValidUntil).Format(awsAutoscalingScheduleTimeLayout))
|
||||
}
|
||||
|
||||
d.Set("replace_unhealthy_instances", config.ReplaceUnhealthyInstances)
|
||||
d.Set("launch_specification", launchSpecsToSet(config.LaunchSpecifications, conn))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func launchSpecsToSet(ls []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
|
||||
specs := &schema.Set{F: hashLaunchSpecification}
|
||||
for _, val := range ls {
|
||||
dn, err := fetchRootDeviceName(aws.StringValue(val.ImageId), conn)
|
||||
func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
|
||||
specSet := &schema.Set{F: hashLaunchSpecification}
|
||||
for _, spec := range launchSpecs {
|
||||
rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
} else {
|
||||
ls := launchSpecToMap(val, dn)
|
||||
specs.Add(ls)
|
||||
}
|
||||
|
||||
specSet.Add(launchSpecToMap(spec, rootDeviceName))
|
||||
}
|
||||
return specs
|
||||
return specSet
|
||||
}
|
||||
|
||||
func launchSpecToMap(
|
||||
l *ec2.SpotFleetLaunchSpecification,
|
||||
rootDevName *string,
|
||||
) map[string]interface{} {
|
||||
func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
|
||||
m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName)
|
||||
|
@ -779,10 +774,7 @@ func launchSpecToMap(
|
|||
}
|
||||
|
||||
if l.UserData != nil {
|
||||
ud_dec, err := base64.StdEncoding.DecodeString(aws.StringValue(l.UserData))
|
||||
if err == nil {
|
||||
m["user_data"] = string(ud_dec)
|
||||
}
|
||||
m["user_data"] = userDataHashSum(aws.StringValue(l.UserData))
|
||||
}
|
||||
|
||||
if l.KeyName != nil {
|
||||
|
@ -797,11 +789,23 @@ func launchSpecToMap(
|
|||
m["subnet_id"] = aws.StringValue(l.SubnetId)
|
||||
}
|
||||
|
||||
securityGroupIds := &schema.Set{F: schema.HashString}
|
||||
if len(l.NetworkInterfaces) > 0 {
|
||||
// This resource auto-creates one network interface when associate_public_ip_address is true
|
||||
for _, group := range l.NetworkInterfaces[0].Groups {
|
||||
securityGroupIds.Add(aws.StringValue(group))
|
||||
}
|
||||
} else {
|
||||
for _, group := range l.SecurityGroups {
|
||||
securityGroupIds.Add(aws.StringValue(group.GroupId))
|
||||
}
|
||||
}
|
||||
m["vpc_security_group_ids"] = securityGroupIds
|
||||
|
||||
if l.WeightedCapacity != nil {
|
||||
m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64)
|
||||
}
|
||||
|
||||
// m["security_groups"] = securityGroupsToSet(l.SecutiryGroups)
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -1009,7 +1013,6 @@ func hashLaunchSpecification(v interface{}) int {
|
|||
}
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["user_data"].(string)))
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
|
|
|
@ -100,9 +100,9 @@ func TestAccAWSSpotFleetRequest_lowestPriceAzInGivenList(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.3809475891.availability_zone", "us-west-2b"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.1671188867.availability_zone", "us-west-2b"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -154,13 +154,13 @@ func TestAccAWSSpotFleetRequest_multipleInstanceTypesInSameAz(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.availability_zone", "us-west-2a"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.590403189.availability_zone", "us-west-2a"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -214,13 +214,13 @@ func TestAccAWSSpotFleetRequest_overriddingSpotPrice(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.522395050.spot_price", "0.01"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.4143232216.spot_price", "0.01"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.522395050.instance_type", "m3.large"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.4143232216.instance_type", "m3.large"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.spot_price", ""), //there will not be a value here since it's not overriding
|
||||
"aws_spot_fleet_request.foo", "launch_specification.335709043.spot_price", ""), //there will not be a value here since it's not overriding
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -289,13 +289,13 @@ func TestAccAWSSpotFleetRequest_withWeightedCapacity(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.2325690000.weighted_capacity", "3"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.4120185872.weighted_capacity", "3"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.2325690000.instance_type", "r3.large"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.4120185872.instance_type", "r3.large"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.weighted_capacity", "6"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.590403189.weighted_capacity", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"),
|
||||
"aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
|
|
@ -266,6 +266,29 @@ func readInstance(d *schema.ResourceData, meta interface{}) error {
|
|||
if err := readBlockDevices(d, instance, conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ipv6Addresses []string
|
||||
if len(instance.NetworkInterfaces) > 0 {
|
||||
for _, ni := range instance.NetworkInterfaces {
|
||||
if *ni.Attachment.DeviceIndex == 0 {
|
||||
d.Set("subnet_id", ni.SubnetId)
|
||||
d.Set("network_interface_id", ni.NetworkInterfaceId)
|
||||
d.Set("associate_public_ip_address", ni.Association != nil)
|
||||
d.Set("ipv6_address_count", len(ni.Ipv6Addresses))
|
||||
|
||||
for _, address := range ni.Ipv6Addresses {
|
||||
ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
d.Set("subnet_id", instance.SubnetId)
|
||||
d.Set("network_interface_id", "")
|
||||
}
|
||||
|
||||
if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil {
|
||||
log.Printf("[WARN] Error setting ipv6_addresses for AWS Spot Instance (%s): %s", d.Id(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -77,6 +77,25 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{})
|
|||
|
||||
vols, err := conn.DescribeVolumes(request)
|
||||
if (err != nil) || (len(vols.Volumes) == 0) {
|
||||
// This handles the situation where the instance is created by
|
||||
// a spot request and whilst the request has been fulfilled the
|
||||
// instance is not running yet
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: []string{"running"},
|
||||
Refresh: InstanceStateRefreshFunc(conn, iID),
|
||||
Timeout: 10 * time.Minute,
|
||||
Delay: 10 * time.Second,
|
||||
MinTimeout: 3 * time.Second,
|
||||
}
|
||||
|
||||
_, err = stateConf.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error waiting for instance (%s) to become ready: %s",
|
||||
iID, err)
|
||||
}
|
||||
|
||||
// not attached
|
||||
opts := &ec2.AttachVolumeInput{
|
||||
Device: aws.String(name),
|
||||
|
|
|
@ -216,6 +216,12 @@ func expandIPPerms(
|
|||
perm.IpRanges = append(perm.IpRanges, &ec2.IpRange{CidrIp: aws.String(v.(string))})
|
||||
}
|
||||
}
|
||||
if raw, ok := m["ipv6_cidr_blocks"]; ok {
|
||||
list := raw.([]interface{})
|
||||
for _, v := range list {
|
||||
perm.Ipv6Ranges = append(perm.Ipv6Ranges, &ec2.Ipv6Range{CidrIpv6: aws.String(v.(string))})
|
||||
}
|
||||
}
|
||||
|
||||
if raw, ok := m["prefix_list_ids"]; ok {
|
||||
list := raw.([]interface{})
|
||||
|
|
|
@ -2,6 +2,8 @@ package aws
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
|
@ -24,3 +26,17 @@ func isBase64Encoded(data []byte) bool {
|
|||
func looksLikeJsonString(s interface{}) bool {
|
||||
return regexp.MustCompile(`^\s*{`).MatchString(s.(string))
|
||||
}
|
||||
|
||||
func jsonBytesEqual(b1, b2 []byte) bool {
|
||||
var o1 interface{}
|
||||
if err := json.Unmarshal(b1, &o1); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var o2 interface{}
|
||||
if err := json.Unmarshal(b2, &o2); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(o1, o2)
|
||||
}
|
||||
|
|
|
@ -32,3 +32,41 @@ func TestLooksLikeJsonString(t *testing.T) {
|
|||
t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonBytesEqualQuotedAndUnquoted(t *testing.T) {
|
||||
unquoted := `{"test": "test"}`
|
||||
quoted := "{\"test\": \"test\"}"
|
||||
|
||||
if !jsonBytesEqual([]byte(unquoted), []byte(quoted)) {
|
||||
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", unquoted, quoted)
|
||||
}
|
||||
|
||||
unquotedDiff := `{"test": "test"}`
|
||||
quotedDiff := "{\"test\": \"tested\"}"
|
||||
|
||||
if jsonBytesEqual([]byte(unquotedDiff), []byte(quotedDiff)) {
|
||||
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", unquotedDiff, quotedDiff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonBytesEqualWhitespaceAndNoWhitespace(t *testing.T) {
|
||||
noWhitespace := `{"test":"test"}`
|
||||
whitespace := `
|
||||
{
|
||||
"test": "test"
|
||||
}`
|
||||
|
||||
if !jsonBytesEqual([]byte(noWhitespace), []byte(whitespace)) {
|
||||
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", noWhitespace, whitespace)
|
||||
}
|
||||
|
||||
noWhitespaceDiff := `{"test":"test"}`
|
||||
whitespaceDiff := `
|
||||
{
|
||||
"test": "tested"
|
||||
}`
|
||||
|
||||
if jsonBytesEqual([]byte(noWhitespaceDiff), []byte(whitespaceDiff)) {
|
||||
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -930,3 +930,22 @@ func validateConfigExecutionFrequency(v interface{}, k string) (ws []string, err
|
|||
k, frequency, validFrequencies))
|
||||
return
|
||||
}
|
||||
|
||||
func validateAccountAlias(v interface{}, k string) (ws []string, es []error) {
|
||||
val := v.(string)
|
||||
|
||||
if (len(val) < 3) || (len(val) > 63) {
|
||||
es = append(es, fmt.Errorf("%q must contain from 3 to 63 alphanumeric characters or hyphens", k))
|
||||
}
|
||||
if !regexp.MustCompile("^[a-z0-9][a-z0-9-]+$").MatchString(val) {
|
||||
es = append(es, fmt.Errorf("%q must start with an alphanumeric character and only contain lowercase alphanumeric characters and hyphens", k))
|
||||
}
|
||||
if strings.Contains(val, "--") {
|
||||
es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k))
|
||||
}
|
||||
if strings.HasSuffix(val, "-") {
|
||||
es = append(es, fmt.Errorf("%q must not end in a hyphen", k))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1550,3 +1550,32 @@ func TestValidateDmsReplicationTaskId(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAccountAlias(t *testing.T) {
|
||||
validAliases := []string{
|
||||
"tf-alias",
|
||||
"0tf-alias1",
|
||||
}
|
||||
|
||||
for _, s := range validAliases {
|
||||
_, errors := validateAccountAlias(s, "account_alias")
|
||||
if len(errors) > 0 {
|
||||
t.Fatalf("%q should be a valid account alias: %v", s, errors)
|
||||
}
|
||||
}
|
||||
|
||||
invalidAliases := []string{
|
||||
"tf",
|
||||
"-tf",
|
||||
"tf-",
|
||||
"TF-Alias",
|
||||
"tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias",
|
||||
}
|
||||
|
||||
for _, s := range invalidAliases {
|
||||
_, errors := validateAccountAlias(s, "account_alias")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("%q should not be a valid account alias: %v", s, errors)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package azurerm
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
|
@ -191,6 +194,12 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
|
|||
|
||||
client.StopContext = p.StopContext()
|
||||
|
||||
// replaces the context between tests
|
||||
p.MetaReset = func() error {
|
||||
client.StopContext = p.StopContext()
|
||||
return nil
|
||||
}
|
||||
|
||||
// List all the available providers and their registration state to avoid unnecessary
|
||||
// requests. This also lets us check if the provider credentials are correct.
|
||||
providerList, err := client.providers.List(nil, "")
|
||||
|
@ -323,3 +332,31 @@ func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool
|
|||
func ignoreCaseStateFunc(val interface{}) string {
|
||||
return strings.ToLower(val.(string))
|
||||
}
|
||||
|
||||
func userDataStateFunc(v interface{}) string {
|
||||
switch s := v.(type) {
|
||||
case string:
|
||||
s = base64Encode(s)
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return hex.EncodeToString(hash[:])
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Base64Encode encodes data if the input isn't already encoded using
|
||||
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
|
||||
// return the original input unchanged.
|
||||
func base64Encode(data string) string {
|
||||
// Check whether the data is already Base64 encoded; don't double-encode
|
||||
if isBase64Encoded(data) {
|
||||
return data
|
||||
}
|
||||
// data has not been encoded encode and return
|
||||
return base64.StdEncoding.EncodeToString([]byte(data))
|
||||
}
|
||||
|
||||
func isBase64Encoded(data string) bool {
|
||||
_, err := base64.StdEncoding.DecodeString(data)
|
||||
return err == nil
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue